mirror of
https://github.com/cloudreve/cloudreve.git
synced 2026-03-03 23:07:00 +00:00
Compare commits
746 Commits
3.0.0-beta
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5b823305d5 | ||
|
|
edf16a9ed8 | ||
|
|
5d915f11ff | ||
|
|
d9baa74c81 | ||
|
|
3180c72b53 | ||
|
|
95865add54 | ||
|
|
9a59c8348e | ||
|
|
846366d223 | ||
|
|
5d45691e43 | ||
|
|
2a59407916 | ||
|
|
a8a625e967 | ||
|
|
153a00ecd5 | ||
|
|
1e3b851e19 | ||
|
|
ec9fdd33bc | ||
|
|
6322a9e951 | ||
|
|
57239e81af | ||
|
|
9dcc82ead8 | ||
|
|
b913b4683f | ||
|
|
1f580f0d8a | ||
|
|
87d48ac4a7 | ||
|
|
5d9cfaa973 | ||
|
|
2241a9e2c8 | ||
|
|
1c5eefdc6a | ||
|
|
c99a4ece90 | ||
|
|
43d77d2319 | ||
|
|
e4e6beb52d | ||
|
|
47218607ff | ||
|
|
5b214beadc | ||
|
|
2ecc7f4f59 | ||
|
|
2725bd47b5 | ||
|
|
864332f2e5 | ||
|
|
a84c5d8e97 | ||
|
|
a908ec462f | ||
|
|
bc6845bd74 | ||
|
|
7039fa801d | ||
|
|
6f8aecd35a | ||
|
|
722abb81c5 | ||
|
|
e8f965e980 | ||
|
|
f01ed64bdb | ||
|
|
736414fa10 | ||
|
|
5924e406ab | ||
|
|
87b1020c4a | ||
|
|
32632db36f | ||
|
|
c01b748dfc | ||
|
|
05c68b4062 | ||
|
|
a08c796e3f | ||
|
|
fec4dec3ac | ||
|
|
67c6f937c9 | ||
|
|
6ad72e07f4 | ||
|
|
994ef7af81 | ||
|
|
b507c1b893 | ||
|
|
deecc5c20b | ||
|
|
6085f2090f | ||
|
|
670b79eef3 | ||
|
|
4785be81c2 | ||
|
|
f27969d74f | ||
|
|
e3580d9351 | ||
|
|
16b02b1fb3 | ||
|
|
6bd30a8af7 | ||
|
|
21cdafb2af | ||
|
|
e29237d593 | ||
|
|
46897e2880 | ||
|
|
213eaa54dd | ||
|
|
e7d6fb25e4 | ||
|
|
e3e08a9b75 | ||
|
|
78f7ec8b08 | ||
|
|
3d41e00384 | ||
|
|
5e5dca40c4 | ||
|
|
668b542c59 | ||
|
|
440ab775b8 | ||
|
|
678593f30d | ||
|
|
58ceae9708 | ||
|
|
3b8110b648 | ||
|
|
f0c5b08428 | ||
|
|
9434c2f29b | ||
|
|
7d97237593 | ||
|
|
a581851f84 | ||
|
|
fe7cf5d0d8 | ||
|
|
cec2b55e1e | ||
|
|
af43746ba2 | ||
|
|
9f1cb52cfb | ||
|
|
4acf9401b8 | ||
|
|
c3ed4f5839 | ||
|
|
9b40e0146f | ||
|
|
a16b491f65 | ||
|
|
a095117061 | ||
|
|
acc660f112 | ||
|
|
a677e23394 | ||
|
|
13e774f27d | ||
|
|
91717b7c49 | ||
|
|
a1ce16bd5e | ||
|
|
872b08e5da | ||
|
|
f73583b370 | ||
|
|
c0132a10cb | ||
|
|
927c3bff00 | ||
|
|
bb9b42eb10 | ||
|
|
5f18d277c8 | ||
|
|
b0057fe92f | ||
|
|
bb3db2e326 | ||
|
|
8deeadb1e5 | ||
|
|
8688069fac | ||
|
|
4c08644b05 | ||
|
|
4c976b8627 | ||
|
|
b0375f5a24 | ||
|
|
48e9719336 | ||
|
|
7654ce889c | ||
|
|
80b25e88ee | ||
|
|
e31a6cbcb3 | ||
|
|
51d9e06f21 | ||
|
|
36be9b7a19 | ||
|
|
c8c2a60adb | ||
|
|
60bf0e02b3 | ||
|
|
488f32512d | ||
|
|
1cdccf5fc9 | ||
|
|
15762cb393 | ||
|
|
e96b595622 | ||
|
|
d19fc0e75c | ||
|
|
195d68c535 | ||
|
|
000124f6c7 | ||
|
|
ca57ca1ba0 | ||
|
|
3cda4d1ef7 | ||
|
|
b13490357b | ||
|
|
617d3a4262 | ||
|
|
75a03aa708 | ||
|
|
fe2ccb4d4e | ||
|
|
aada3aab02 | ||
|
|
a0aefef691 | ||
|
|
17fc598fb3 | ||
|
|
19a65b065c | ||
|
|
e0b2b4649e | ||
|
|
642c32c6cc | ||
|
|
6106b57bc7 | ||
|
|
f38f32f9f5 | ||
|
|
d382bd8f8d | ||
|
|
02abeaed2e | ||
|
|
6c9a72af14 | ||
|
|
4562042b8d | ||
|
|
dc611bcb0d | ||
|
|
2500ebc6a4 | ||
|
|
3db522609e | ||
|
|
d1bbfd4bc4 | ||
|
|
b11188fa50 | ||
|
|
1bd62e8feb | ||
|
|
fec549f5ec | ||
|
|
8fe2889772 | ||
|
|
bdc0aafab0 | ||
|
|
3de33aeb10 | ||
|
|
9f9796f2f3 | ||
|
|
9a216cd09e | ||
|
|
41eb010698 | ||
|
|
9d28fde00c | ||
|
|
40644f5234 | ||
|
|
d6d615e689 | ||
|
|
95d2b5804e | ||
|
|
a517f41ab1 | ||
|
|
e57e11a30e | ||
|
|
5f1b3a2bed | ||
|
|
b5136fc5e4 | ||
|
|
633ea479d7 | ||
|
|
8d6d188c3f | ||
|
|
6561e3075f | ||
|
|
e750cbfb77 | ||
|
|
3ab86e9b1d | ||
|
|
e2dbb0404a | ||
|
|
2a7b46437f | ||
|
|
fe309b234c | ||
|
|
522fcca6af | ||
|
|
c13b7365b0 | ||
|
|
51fa9f66a5 | ||
|
|
65095855c1 | ||
|
|
ec53769e33 | ||
|
|
9a96a88243 | ||
|
|
e0b193427c | ||
|
|
1fa70dc699 | ||
|
|
db7b54c5d7 | ||
|
|
c6ee3e5dcd | ||
|
|
9f5ebe11b6 | ||
|
|
1a3c3311e6 | ||
|
|
acffd984c1 | ||
|
|
7ddb611d6c | ||
|
|
7bace40a4d | ||
|
|
2fac086127 | ||
|
|
a10a008ed7 | ||
|
|
5d72faf688 | ||
|
|
0a28bf1689 | ||
|
|
bdaf091aca | ||
|
|
d60c3e6bf4 | ||
|
|
1e2cfe0061 | ||
|
|
71a624c10e | ||
|
|
1b8beb3390 | ||
|
|
762811d50f | ||
|
|
edd50147e7 | ||
|
|
006bcabcdb | ||
|
|
10e3854082 | ||
|
|
fbf1d1d42c | ||
|
|
c5467f228a | ||
|
|
2a6a43d242 | ||
|
|
6e82ce2a9d | ||
|
|
a0b4c97db0 | ||
|
|
2333ed3501 | ||
|
|
67d3b25c87 | ||
|
|
ca47f79ecb | ||
|
|
77ae381474 | ||
|
|
c6eef43590 | ||
|
|
d8fc81d0eb | ||
|
|
d195002bf7 | ||
|
|
d6496ee9a0 | ||
|
|
55a3669a9e | ||
|
|
969e35192a | ||
|
|
224ac28ffe | ||
|
|
cc69178310 | ||
|
|
9226d0c8ec | ||
|
|
7b5e0e8581 | ||
|
|
d60e400f83 | ||
|
|
21d158db07 | ||
|
|
da4e44b77a | ||
|
|
3373b9dc02 | ||
|
|
12e3f10ad7 | ||
|
|
23d009d611 | ||
|
|
3edb00a648 | ||
|
|
88409cc1f0 | ||
|
|
cd6eee0b60 | ||
|
|
3ffce1e356 | ||
|
|
ce832bf13d | ||
|
|
5642dd3b66 | ||
|
|
a1747073df | ||
|
|
ad6c6bcd93 | ||
|
|
f4a04ce3c3 | ||
|
|
247e31079c | ||
|
|
a26893aabc | ||
|
|
ce759c02b1 | ||
|
|
9f6f9adc89 | ||
|
|
91025b9f24 | ||
|
|
a9bee3e638 | ||
|
|
243c312066 | ||
|
|
1d52ddd93a | ||
|
|
cbc549229b | ||
|
|
173ca6cdf8 | ||
|
|
fb166fb3e4 | ||
|
|
b1344616b8 | ||
|
|
89ee147961 | ||
|
|
4aafe1dc7a | ||
|
|
4c834e75fa | ||
|
|
31d4a3445d | ||
|
|
37926e3133 | ||
|
|
4c18e5acd1 | ||
|
|
6358740cc9 | ||
|
|
00d56d6d07 | ||
|
|
b9143b53f6 | ||
|
|
b9d9e036c9 | ||
|
|
4d131db504 | ||
|
|
c5ffdbfcfb | ||
|
|
8e2fc1a8f6 | ||
|
|
ce579d387a | ||
|
|
f1e7af67bc | ||
|
|
98788dc72b | ||
|
|
1b4eff624d | ||
|
|
408733a974 | ||
|
|
c8b736bd8f | ||
|
|
cf03206283 | ||
|
|
ac536408c6 | ||
|
|
98b86b37de | ||
|
|
b55344459d | ||
|
|
bde4459519 | ||
|
|
f5a21a7e6f | ||
|
|
b910254cc5 | ||
|
|
e115497dfe | ||
|
|
62b73b577b | ||
|
|
7cb5e68b78 | ||
|
|
ae118c337e | ||
|
|
f36e39991d | ||
|
|
da1eaf2d1f | ||
|
|
42f7613bfa | ||
|
|
e8e38029ca | ||
|
|
cd9e9e25b9 | ||
|
|
ca7b21dc3e | ||
|
|
f172220825 | ||
|
|
37cb292530 | ||
|
|
835605a5cb | ||
|
|
35c4215c0f | ||
|
|
3db803ed38 | ||
|
|
c2d7168c26 | ||
|
|
b441d884f6 | ||
|
|
d4c79cb962 | ||
|
|
e134826bd1 | ||
|
|
b78f475df8 | ||
|
|
e7de7e868d | ||
|
|
a58e3b19ec | ||
|
|
71cc332109 | ||
|
|
076aa2c567 | ||
|
|
7dfe8fb439 | ||
|
|
b1b74b7be5 | ||
|
|
abe90e4c88 | ||
|
|
95027e4f5d | ||
|
|
9c58278e08 | ||
|
|
6d1c44f21b | ||
|
|
489a2bab4f | ||
|
|
d67d0512f8 | ||
|
|
1c1cd9b342 | ||
|
|
2a1e82aede | ||
|
|
a93ea2cfa0 | ||
|
|
ffbafca994 | ||
|
|
99434d7aa5 | ||
|
|
f7fdf10d70 | ||
|
|
9ad2c3508f | ||
|
|
5a8c86c72e | ||
|
|
1c922ac981 | ||
|
|
4541400755 | ||
|
|
c39daeb0d0 | ||
|
|
8dafb4f40a | ||
|
|
42a31f2fd1 | ||
|
|
ca80051a89 | ||
|
|
bc0c374f00 | ||
|
|
e4c87483d6 | ||
|
|
1227f35d3c | ||
|
|
08fa6964a9 | ||
|
|
9eafe07f4e | ||
|
|
73d0f2db9b | ||
|
|
82b4e29a80 | ||
|
|
9860ebbca9 | ||
|
|
435a03dd34 | ||
|
|
4e8ab75211 | ||
|
|
6ceb255512 | ||
|
|
74e1bd6a43 | ||
|
|
fd59d1b5ca | ||
|
|
2bb28a9845 | ||
|
|
5f4f6bd91a | ||
|
|
053e4352b4 | ||
|
|
08e4d2257a | ||
|
|
f02b6f0286 | ||
|
|
50a3917a65 | ||
|
|
8c5ba89f7d | ||
|
|
4519dc025b | ||
|
|
92cbc9f312 | ||
|
|
756769335f | ||
|
|
6b63195d28 | ||
|
|
db6681f448 | ||
|
|
4b85541d73 | ||
|
|
f8ed4b4a5a | ||
|
|
7dda81368d | ||
|
|
1c25232b06 | ||
|
|
8d7ecedf47 | ||
|
|
1f836a4b8b | ||
|
|
c17cf1946a | ||
|
|
392c824a33 | ||
|
|
8494bd6eb9 | ||
|
|
c7dc143d30 | ||
|
|
8b30593822 | ||
|
|
56fa01ed61 | ||
|
|
560097145b | ||
|
|
8cec65b0a7 | ||
|
|
f89653cea7 | ||
|
|
6b0b44f6d0 | ||
|
|
63b536e5db | ||
|
|
19a2f69a19 | ||
|
|
2271fcfdef | ||
|
|
16b5fc3f60 | ||
|
|
f431eb0cbd | ||
|
|
644a326580 | ||
|
|
f2c53dda31 | ||
|
|
28c2ffe72e | ||
|
|
196729bae8 | ||
|
|
9bb4a5263c | ||
|
|
7366ff534e | ||
|
|
db23f4061d | ||
|
|
16d17ac1e6 | ||
|
|
9464ee2103 | ||
|
|
88e10aeaa2 | ||
|
|
b1685d2863 | ||
|
|
846438e3af | ||
|
|
96daed26b4 | ||
|
|
906e9857bc | ||
|
|
08104646ba | ||
|
|
a1880672b1 | ||
|
|
9869671633 | ||
|
|
c99b36f788 | ||
|
|
25d56fad6e | ||
|
|
f083d52e17 | ||
|
|
4859ea6ee5 | ||
|
|
21d2b817f4 | ||
|
|
04b0b87082 | ||
|
|
2a3759c315 | ||
|
|
36b310133c | ||
|
|
3fa1249678 | ||
|
|
fb56b27062 | ||
|
|
e705dedc22 | ||
|
|
7bd5a8e3cd | ||
|
|
5bd711afc6 | ||
|
|
eef6c40441 | ||
|
|
a78407d878 | ||
|
|
46c6ee9be7 | ||
|
|
c9eefcb946 | ||
|
|
4fe79859a9 | ||
|
|
4d4a31c250 | ||
|
|
0e5683bc3b | ||
|
|
a31ac2299a | ||
|
|
3b16d7d77c | ||
|
|
8ab0fe0e2f | ||
|
|
d51351eebd | ||
|
|
6af1eeb9fb | ||
|
|
94507fe609 | ||
|
|
1038bae238 | ||
|
|
4a4375a796 | ||
|
|
862c7b2fd8 | ||
|
|
9ab643a71b | ||
|
|
7bdbf3e754 | ||
|
|
da68e8ede4 | ||
|
|
23642d7597 | ||
|
|
a523fc4e2c | ||
|
|
70b30f8d5f | ||
|
|
7c8e9054ce | ||
|
|
853bd4c280 | ||
|
|
d845824bd8 | ||
|
|
ae33e077a3 | ||
|
|
11043b43e6 | ||
|
|
c62e355345 | ||
|
|
a3d0291f41 | ||
|
|
024f09f666 | ||
|
|
f46e40f31c | ||
|
|
b29bf11748 | ||
|
|
2dcf1664a6 | ||
|
|
dc69a63217 | ||
|
|
86876a1c11 | ||
|
|
cb51046305 | ||
|
|
ac78e9db02 | ||
|
|
d10639fd19 | ||
|
|
ba0e3278e3 | ||
|
|
0fb31f4523 | ||
|
|
d0779f564e | ||
|
|
350954911e | ||
|
|
b8bc5bed13 | ||
|
|
91377f4676 | ||
|
|
b1803fa51f | ||
|
|
f8b7e086ba | ||
|
|
23bd1389bc | ||
|
|
ff22f5c8b9 | ||
|
|
aaf8a793ee | ||
|
|
2ab2662fcd | ||
|
|
71df067a76 | ||
|
|
7a3d44451b | ||
|
|
d34cb3e5d3 | ||
|
|
b5e8e4843f | ||
|
|
86877aef4b | ||
|
|
3d9b9ae5d6 | ||
|
|
8741c3cc78 | ||
|
|
6c93e37777 | ||
|
|
841a2e258d | ||
|
|
da2f6c5b07 | ||
|
|
a26183875f | ||
|
|
79913a5dfa | ||
|
|
4f6989f1b8 | ||
|
|
fcc29e31eb | ||
|
|
00e2b26294 | ||
|
|
4f65d0e859 | ||
|
|
3804efd792 | ||
|
|
0c9383e329 | ||
|
|
13d36c25d4 | ||
|
|
18f5bffed1 | ||
|
|
478d390867 | ||
|
|
febbd0c5a0 | ||
|
|
32a655f84e | ||
|
|
0a18d984ab | ||
|
|
265bc099b2 | ||
|
|
90a47c9ec0 | ||
|
|
6451e4c903 | ||
|
|
b50756dbcb | ||
|
|
23dc7e370e | ||
|
|
1f3c1d7ce2 | ||
|
|
84807be1ca | ||
|
|
20e90e3963 | ||
|
|
ace398d87b | ||
|
|
ec776ac837 | ||
|
|
d117080991 | ||
|
|
1c0a735df8 | ||
|
|
c6130ab078 | ||
|
|
31315c86ee | ||
|
|
636ac52a3f | ||
|
|
1821923b74 | ||
|
|
a568e5e45a | ||
|
|
e51c5cd70d | ||
|
|
5a3ea89866 | ||
|
|
eaa8c9e12d | ||
|
|
d54ca151b2 | ||
|
|
7eb8173101 | ||
|
|
d3016b60af | ||
|
|
9e5713b139 | ||
|
|
07f13cc350 | ||
|
|
0df9529b32 | ||
|
|
015ccd5026 | ||
|
|
5802161102 | ||
|
|
b6efca1878 | ||
|
|
15e3e3db5c | ||
|
|
24dfb2c24e | ||
|
|
dd4c3e05d3 | ||
|
|
5bda037d74 | ||
|
|
c89327631e | ||
|
|
9136f3caec | ||
|
|
0650684dd9 | ||
|
|
effbc8607e | ||
|
|
b96019be7c | ||
|
|
081e75146c | ||
|
|
e0714fdd53 | ||
|
|
4925a356e3 | ||
|
|
050a68a359 | ||
|
|
7214e59c25 | ||
|
|
118d738797 | ||
|
|
285611baf7 | ||
|
|
521c5c8dc4 | ||
|
|
285e80ba76 | ||
|
|
2811ee3285 | ||
|
|
7dd636da74 | ||
|
|
3444b4a75e | ||
|
|
c301bd6045 | ||
|
|
72173bf894 | ||
|
|
6fdf77e00e | ||
|
|
e37e93a7b6 | ||
|
|
868a88e5fc | ||
|
|
8a222e7df4 | ||
|
|
8443a30fb1 | ||
|
|
de9c41082c | ||
|
|
855c9d92c4 | ||
|
|
c84d0114ae | ||
|
|
c31c77a089 | ||
|
|
6b15cae0b5 | ||
|
|
84d81f201f | ||
|
|
af4d9767c2 | ||
|
|
45597adcd3 | ||
|
|
762f0f9c68 | ||
|
|
c5074df1c7 | ||
|
|
7ea72cf364 | ||
|
|
4eb7525c51 | ||
|
|
3948ee7f3a | ||
|
|
865a801fa8 | ||
|
|
05941616df | ||
|
|
51b1e5b854 | ||
|
|
4dbe867020 | ||
|
|
8c8ad3e149 | ||
|
|
fce38209bc | ||
|
|
700e13384e | ||
|
|
7fd984f95d | ||
|
|
9fc08292a0 | ||
|
|
8c5445a26d | ||
|
|
96b84bb5e5 | ||
|
|
9056ef9171 | ||
|
|
532bff820a | ||
|
|
fcd9eddc54 | ||
|
|
6c9967b120 | ||
|
|
416f4c1dd2 | ||
|
|
f0089045d7 | ||
|
|
4b88eacb6a | ||
|
|
54ed7e43ca | ||
|
|
4d7b8685b9 | ||
|
|
eeee43d569 | ||
|
|
3064ed60f3 | ||
|
|
e41ec9defa | ||
|
|
eaa0f6be91 | ||
|
|
5db476634a | ||
|
|
1f06ee3af6 | ||
|
|
22bbfe7da1 | ||
|
|
f1dc4c4758 | ||
|
|
5f861b963a | ||
|
|
056de22edb | ||
|
|
a3b4a22dbc | ||
|
|
9ff1b47646 | ||
|
|
65c4367689 | ||
|
|
db7489fb61 | ||
|
|
622b928a90 | ||
|
|
c0158ea224 | ||
|
|
e6959a5026 | ||
|
|
9d64bdd9f6 | ||
|
|
c85c2da523 | ||
|
|
8659bdcf77 | ||
|
|
641fe352da | ||
|
|
96712fb066 | ||
|
|
a1252c810b | ||
|
|
e781185ad2 | ||
|
|
95802efcec | ||
|
|
233648b956 | ||
|
|
53acadf098 | ||
|
|
c0f7214cdb | ||
|
|
ccaefdab33 | ||
|
|
6efd8e8183 | ||
|
|
144b534486 | ||
|
|
e160154d3b | ||
|
|
2381eca230 | ||
|
|
adde486a30 | ||
|
|
a9c0d6ed17 | ||
|
|
595f4a1350 | ||
|
|
a5f80a4431 | ||
|
|
6fb419d998 | ||
|
|
3f0f33b4fc | ||
|
|
052e6be393 | ||
|
|
a4b0ad81e9 | ||
|
|
8431906b94 | ||
|
|
40476953aa | ||
|
|
270f617b9d | ||
|
|
170f2279c1 | ||
|
|
d1377262e3 | ||
|
|
c9acf7e64e | ||
|
|
4e2f243436 | ||
|
|
a54acd71c2 | ||
|
|
fec2fe14f8 | ||
|
|
1f1bc056e3 | ||
|
|
e44ec0e6bf | ||
|
|
a93b964d8b | ||
|
|
d9cff24c75 | ||
|
|
e2488841b4 | ||
|
|
a276be4098 | ||
|
|
4cf6c81534 | ||
|
|
5a66af3105 | ||
|
|
fc5c67cc20 | ||
|
|
5e226efea1 | ||
|
|
c949d47161 | ||
|
|
e699287ffd | ||
|
|
9c78515c72 | ||
|
|
3b22b4fd25 | ||
|
|
08d998b41e | ||
|
|
488e62f762 | ||
|
|
f35ad3fe0a | ||
|
|
61e6d9b591 | ||
|
|
feb1134a7c | ||
|
|
9f2f14cacf | ||
|
|
055ed0e075 | ||
|
|
c87109c8b1 | ||
|
|
8057c4b8bc | ||
|
|
5ab93a6e0d | ||
|
|
5d406f1c6a | ||
|
|
5b44606276 | ||
|
|
bd2bdf253b | ||
|
|
0cfa61e264 | ||
|
|
f7c8039116 | ||
|
|
6486e8799b | ||
|
|
7279be2924 | ||
|
|
33f8419999 | ||
|
|
a5805b022a | ||
|
|
ae89b402f6 | ||
|
|
0d210e87b3 | ||
|
|
f0a68236a8 | ||
|
|
c6110e9e75 | ||
|
|
d97bc26042 | ||
|
|
11c218eb94 | ||
|
|
79b8784934 | ||
|
|
59d50b1b98 | ||
|
|
746aa3e8ef | ||
|
|
95f318e069 | ||
|
|
77394313aa | ||
|
|
41eb84a221 | ||
|
|
40414fe6ae | ||
|
|
7df09537e0 | ||
|
|
f478c38307 | ||
|
|
bfd2340732 | ||
|
|
dd50ef1c25 | ||
|
|
27bf8ca9b2 | ||
|
|
c71a2c5b64 | ||
|
|
a7ba357cb8 | ||
|
|
14f5982b47 | ||
|
|
e607311268 | ||
|
|
acc5d53bab | ||
|
|
aa3e8913ab | ||
|
|
ee0f8e964d | ||
|
|
60745ac8ba | ||
|
|
bfb5b34edc | ||
|
|
a5000c0621 | ||
|
|
e038350cf0 | ||
|
|
5af3c4e244 | ||
|
|
7ed14c4d81 | ||
|
|
869c0006c5 | ||
|
|
4c458df666 | ||
|
|
ed684420a2 | ||
|
|
2076d56f0f | ||
|
|
280308bc05 | ||
|
|
1172765c58 | ||
|
|
58856612e2 | ||
|
|
ee0f224cbb | ||
|
|
e8a6df9a86 | ||
|
|
b02d27ca0a | ||
|
|
51f66eb06b | ||
|
|
0df8a9ba65 | ||
|
|
79daf92896 | ||
|
|
8a2be58ef3 | ||
|
|
ce7784090f | ||
|
|
dfb663a6e0 | ||
|
|
cfaf20926f | ||
|
|
305497e7cb | ||
|
|
52c2422be9 | ||
|
|
1afc750dae | ||
|
|
960c886496 | ||
|
|
5d579cdadc | ||
|
|
d5fc5745b4 | ||
|
|
a732025d5a | ||
|
|
030fd4ac57 | ||
|
|
9eeb4b6d19 | ||
|
|
36e5b31f73 | ||
|
|
26d4d34837 | ||
|
|
8c547a05fd | ||
|
|
f7311f906b | ||
|
|
6006ff4d22 | ||
|
|
034ed956a3 | ||
|
|
700c5795f5 | ||
|
|
8c3287d380 | ||
|
|
304e7b502c | ||
|
|
514e069113 | ||
|
|
7b571499a7 | ||
|
|
8b68d46bdf | ||
|
|
ab3b59e63d | ||
|
|
9910f8d732 | ||
|
|
99033d61c6 | ||
|
|
e9f3a55eb8 | ||
|
|
3cc9940924 | ||
|
|
bcdf94fd93 | ||
|
|
3c09ad7c02 | ||
|
|
7be0366b1f | ||
|
|
0575b0aa92 | ||
|
|
2e342806b6 | ||
|
|
cf9dc1c24f | ||
|
|
e58fb82463 | ||
|
|
fa900b166a | ||
|
|
2e43f8ed5b | ||
|
|
554493dea4 | ||
|
|
816b537787 | ||
|
|
e07b09186d | ||
|
|
8c7d075484 | ||
|
|
46743f3c1e | ||
|
|
1a1543f190 | ||
|
|
4aef12bf7e | ||
|
|
691c9aeb7d | ||
|
|
6285e45e34 | ||
|
|
f594d0ab83 | ||
|
|
25d1735c1d | ||
|
|
c4c174f560 | ||
|
|
175c4d781f | ||
|
|
87fde687eb | ||
|
|
65cf0f57aa | ||
|
|
0eb04ed0ea | ||
|
|
96983ddc70 | ||
|
|
b98e5efb83 | ||
|
|
ff2dae80f0 | ||
|
|
32c0232105 | ||
|
|
a05a3de0e1 | ||
|
|
79f898e0a9 | ||
|
|
d1ca65461c | ||
|
|
15074015b3 | ||
|
|
e1aced0f01 | ||
|
|
75da09c339 | ||
|
|
7636e59dfe | ||
|
|
e34e67648f |
7
.build/aria2.supervisor.conf
Normal file
7
.build/aria2.supervisor.conf
Normal file
@@ -0,0 +1,7 @@
|
||||
[supervisord]
|
||||
nodaemon=false
|
||||
|
||||
[program:background_process]
|
||||
command=aria2c --enable-rpc --save-session /cloudreve/data
|
||||
autostart=true
|
||||
autorestart=true
|
||||
15
.build/build-assets.sh
Executable file
15
.build/build-assets.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
export NODE_OPTIONS="--max-old-space-size=8192"
|
||||
|
||||
# This script is used to build the assets for the application.
|
||||
cd assets
|
||||
rm -rf build
|
||||
yarn install --network-timeout 1000000
|
||||
yarn version --new-version $1 --no-git-tag-version
|
||||
yarn run build
|
||||
|
||||
# Copy the build files to the application directory
|
||||
cd ../
|
||||
zip -r - assets/build >assets.zip
|
||||
mv assets.zip application/statics
|
||||
2
.build/entrypoint.sh
Executable file
2
.build/entrypoint.sh
Executable file
@@ -0,0 +1,2 @@
|
||||
supervisord -c ./aria2.supervisor.conf
|
||||
./cloudreve
|
||||
19
.github/DISCUSSION_TEMPLATE/general.yml
vendored
Normal file
19
.github/DISCUSSION_TEMPLATE/general.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
title: "General Discussion"
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Self Checks
|
||||
description: "To make sure we get to you in time, please check the following :)"
|
||||
options:
|
||||
- label: I have searched for existing issues [search for existing issues](https://github.com/cloudreve/cloudreve/issues), including closed ones.
|
||||
required: true
|
||||
- label: I confirm that I am using English to submit this report, otherwise it will be closed. / 请使用英语提交,否则会被关闭。
|
||||
required: true
|
||||
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Content
|
||||
placeholder: Please describe the content you would like to discuss.
|
||||
validations:
|
||||
required: true
|
||||
35
.github/DISCUSSION_TEMPLATE/ideas.yml
vendored
Normal file
35
.github/DISCUSSION_TEMPLATE/ideas.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
title: Suggestions for New Features
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Self Checks
|
||||
description: "To make sure we get to you in time, please check the following :)"
|
||||
options:
|
||||
- label: I have searched for existing issues [search for existing issues](https://github.com/cloudreve/cloudreve/issues), including closed ones.
|
||||
required: true
|
||||
- label: I confirm that I am using English to submit this report, otherwise it will be closed. / 请使用英语提交,否则会被关闭。
|
||||
required: true
|
||||
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: 1. Is this request related to a challenge you're experiencing? Tell me about your story.
|
||||
placeholder: Please describe the specific scenario or problem you're facing as clearly as possible. For instance "I was trying to use [feature] for [specific task], and [what happened]... It was frustrating because...."
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: 2. Additional context or comments
|
||||
placeholder: (Any other information, comments, documentations, links, or screenshots that would provide more clarity. This is the place to add anything else not covered above.)
|
||||
validations:
|
||||
required: false
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: 3. Can you help us with this feature?
|
||||
description: Let us know! This is not a commitment, but a starting point for collaboration.
|
||||
options:
|
||||
- label: I am interested in contributing to this feature.
|
||||
required: false
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: Please limit one request per issue.
|
||||
28
.github/DISCUSSION_TEMPLATE/q-a.yml
vendored
Normal file
28
.github/DISCUSSION_TEMPLATE/q-a.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
title: "Q&A"
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Self Checks
|
||||
description: "To make sure we get to you in time, please check the following :)"
|
||||
options:
|
||||
- label: I have searched for existing issues [search for existing issues](https://github.com/cloudreve/cloudreve/issues), including closed ones.
|
||||
required: true
|
||||
- label: I confirm that I am using English to submit this report, otherwise it will be closed. / 请使用英语提交,否则会被关闭。
|
||||
required: true
|
||||
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: 1. Is this request related to a challenge you're experiencing? Tell me about your story.
|
||||
placeholder: Please describe the specific scenario or problem you're facing as clearly as possible. For instance "I was trying to use [feature] for [specific task], and [what happened]... It was frustrating because...."
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: 2. Additional context or comments
|
||||
placeholder: (Any other information, comments, documentations, links, or screenshots that would provide more clarity. This is the place to add anything else not covered above.)
|
||||
validations:
|
||||
required: false
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: Please limit one request per issue.
|
||||
1
.github/FUNDING.yml
vendored
Normal file
1
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1 @@
|
||||
custom: ["https://cloudreve.org/pricing"]
|
||||
91
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
91
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
name: "🕷️ Bug report"
|
||||
description: Report errors or unexpected behavior
|
||||
labels:
|
||||
- bug
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Self Checks
|
||||
description: "To make sure we get to you in time, please check the following :)"
|
||||
options:
|
||||
- label: I have read the [Contributing Guide](https://docs.cloudreve.org/api/contributing) and [Language Policy](https://github.com/cloudreve/cloudreve/discussions/3335).
|
||||
required: true
|
||||
- label: This is only for bug report, if you would like to ask a question, please head to [Discussions](https://github.com/cloudreve/cloudreve/discussions).
|
||||
required: true
|
||||
- label: I have searched for existing issues [search for existing issues](https://github.com/cloudreve/cloudreve/issues), including closed ones.
|
||||
required: true
|
||||
- label: I confirm that I am using English to submit this report, otherwise it will be closed. / 请使用英语提交,否则会被关闭。
|
||||
required: true
|
||||
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
attributes:
|
||||
label: Cloudreve version
|
||||
description: e.g. 4.14.0
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Pro or Community Edition
|
||||
description: What version of Cloudreve are you using?
|
||||
multiple: true
|
||||
options:
|
||||
- Pro
|
||||
- Community (Open Source)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Database type
|
||||
description: What database are you using?
|
||||
multiple: true
|
||||
options:
|
||||
- MySQL
|
||||
- PostgreSQL
|
||||
- SQLite
|
||||
- I don't know
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
attributes:
|
||||
label: Browser and operating system
|
||||
description: What browser and operating system are you using?
|
||||
placeholder: E.g. Chrome 123.0.0 on macOS 14.0.0
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Steps to reproduce
|
||||
description: We highly suggest including screenshots and a bug report log. Please use the right markdown syntax for code blocks.
|
||||
placeholder: Having detailed steps helps us reproduce the bug. If you have logs, please use fenced code blocks (triple backticks ```) to format them.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: ✔️ Expected Behavior
|
||||
description: Describe what you expected to happen.
|
||||
placeholder: What were you expecting? Please do not copy and paste the steps to reproduce here.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: ❌ Actual Behavior
|
||||
description: Describe what actually happened.
|
||||
placeholder: What happened instead? Please do not copy and paste the steps to reproduce here.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: input
|
||||
attributes:
|
||||
label: Addition context information
|
||||
description: Provide any additional context information that might be helpful.
|
||||
placeholder: Any additional information that might be helpful.
|
||||
validations:
|
||||
required: false
|
||||
14
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
14
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: "\U0001F4F1 iOS App related issues"
|
||||
url: "https://github.com/cloudreve/ios-feedback/issues/new"
|
||||
about: Report issues related to the official iOS/iPadOS client.
|
||||
- name: "\U0001F5A5 Desktop client related issues"
|
||||
url: "https://github.com/cloudreve/desktop/issues/new"
|
||||
about: Report issues related to the official desktop client.
|
||||
- name: "\U0001F4AC Documentation Issues"
|
||||
url: "https://github.com/cloudreve/docs/issues/new"
|
||||
about: Report issues with the documentation, such as typos, outdated information, or missing content. Please provide the specific section and details of the issue.
|
||||
- name: "\U0001F4E7 Discussions"
|
||||
url: https://github.com/cloudreve/cloudreve/discussions
|
||||
about: General discussions and seek help from the community
|
||||
40
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
40
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
name: "⭐ Feature or enhancement request"
|
||||
description: Propose something new.
|
||||
labels:
|
||||
- enhancement
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Self Checks
|
||||
description: "To make sure we get to you in time, please check the following :)"
|
||||
options:
|
||||
- label: I have read the [Contributing Guide](https://docs.cloudreve.org/api/contributing) and [Language Policy](https://github.com/cloudreve/cloudreve/discussions/3335).
|
||||
required: true
|
||||
- label: I have searched for existing issues [search for existing issues](https://github.com/cloudreve/cloudreve/issues), including closed ones.
|
||||
required: true
|
||||
- label: I confirm that I am using English to submit this report, otherwise it will be closed. / 请使用英语提交,否则会被关闭。
|
||||
required: true
|
||||
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: 1. Is this request related to a challenge you're experiencing? Tell me about your story.
|
||||
placeholder: Please describe the specific scenario or problem you're facing as clearly as possible. For instance "I was trying to use [feature] for [specific task], and [what happened]... It was frustrating because...."
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: 2. Additional context or comments
|
||||
placeholder: (Any other information, comments, documentations, links, or screenshots that would provide more clarity. This is the place to add anything else not covered above.)
|
||||
validations:
|
||||
required: false
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: 3. Can you help us with this feature?
|
||||
description: Let us know! This is not a commitment, but a starting point for collaboration.
|
||||
options:
|
||||
- label: I am interested in contributing to this feature.
|
||||
required: false
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: Please limit one request per issue.
|
||||
61
.github/stale.yml
vendored
Normal file
61
.github/stale.yml
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
# Configuration for probot-stale - https://github.com/probot/stale
|
||||
|
||||
# Number of days of inactivity before an Issue or Pull Request becomes stale
|
||||
daysUntilStale: 360
|
||||
|
||||
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
|
||||
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
|
||||
daysUntilClose: 30
|
||||
|
||||
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
|
||||
onlyLabels: []
|
||||
|
||||
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
|
||||
exemptLabels:
|
||||
- pinned
|
||||
- security
|
||||
- "[Status] Maybe Later"
|
||||
|
||||
# Set to true to ignore issues in a project (defaults to false)
|
||||
exemptProjects: true
|
||||
|
||||
# Set to true to ignore issues in a milestone (defaults to false)
|
||||
exemptMilestones: true
|
||||
|
||||
# Set to true to ignore issues with an assignee (defaults to false)
|
||||
exemptAssignees: true
|
||||
|
||||
# Label to use when marking as stale
|
||||
staleLabel: wontfix
|
||||
|
||||
# Comment to post when marking as stale. Set to `false` to disable
|
||||
markComment: >
|
||||
This issue has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions.
|
||||
|
||||
# Comment to post when removing the stale label.
|
||||
# unmarkComment: >
|
||||
# Your comment here.
|
||||
|
||||
# Comment to post when closing a stale Issue or Pull Request.
|
||||
# closeComment: >
|
||||
# Your comment here.
|
||||
|
||||
# Limit the number of actions per hour, from 1-30. Default is 30
|
||||
limitPerRun: 30
|
||||
|
||||
# Limit to only `issues` or `pulls`
|
||||
# only: issues
|
||||
|
||||
# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
|
||||
# pulls:
|
||||
# daysUntilStale: 30
|
||||
# markComment: >
|
||||
# This pull request has been automatically marked as stale because it has not had
|
||||
# recent activity. It will be closed if no further activity occurs. Thank you
|
||||
# for your contributions.
|
||||
|
||||
# issues:
|
||||
# exemptLabels:
|
||||
# - confirmed
|
||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -7,6 +7,7 @@
|
||||
*.db
|
||||
*.bin
|
||||
/release/
|
||||
application/statics/assets.zip
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
@@ -26,3 +27,10 @@ version.lock
|
||||
*.ini
|
||||
conf/conf.ini
|
||||
/statik/
|
||||
.vscode/
|
||||
|
||||
dist/
|
||||
data/
|
||||
tmp/
|
||||
.devcontainer/
|
||||
cloudreve
|
||||
|
||||
118
.goreleaser.yaml
Normal file
118
.goreleaser.yaml
Normal file
@@ -0,0 +1,118 @@
|
||||
version: 2
|
||||
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
- chmod +x ./.build/build-assets.sh
|
||||
- ./.build/build-assets.sh {{.Version}}
|
||||
|
||||
builds:
|
||||
- env:
|
||||
- CGO_ENABLED=0
|
||||
|
||||
binary: cloudreve
|
||||
|
||||
ldflags:
|
||||
- -s -w
|
||||
- -X 'github.com/cloudreve/Cloudreve/v4/application/constants.BackendVersion={{.Tag}}' -X 'github.com/cloudreve/Cloudreve/v4/application/constants.LastCommit={{.ShortCommit}}'
|
||||
|
||||
goos:
|
||||
- linux
|
||||
- windows
|
||||
- darwin
|
||||
- freebsd
|
||||
|
||||
goarch:
|
||||
- amd64
|
||||
- arm
|
||||
- arm64
|
||||
- loong64
|
||||
|
||||
goarm:
|
||||
- 5
|
||||
- 6
|
||||
- 7
|
||||
|
||||
ignore:
|
||||
- goos: windows
|
||||
goarm: 5
|
||||
- goos: windows
|
||||
goarm: 6
|
||||
- goos: windows
|
||||
goarm: 7
|
||||
- goos: windows
|
||||
goarch: loong64
|
||||
- goos: freebsd
|
||||
goarch: loong64
|
||||
- goos: freebsd
|
||||
goarch: arm
|
||||
|
||||
archives:
|
||||
- formats: ["tar.gz"]
|
||||
# this name template makes the OS and Arch compatible with the results of uname.
|
||||
name_template: >-
|
||||
cloudreve_{{.Tag}}_{{- .Os }}_{{ .Arch }}
|
||||
{{- if .Arm }}v{{ .Arm }}{{ end }}
|
||||
# use zip for windows archives
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
formats: ["zip"]
|
||||
|
||||
checksum:
|
||||
name_template: "checksums.txt"
|
||||
snapshot:
|
||||
version_template: "{{ incpatch .Version }}-next"
|
||||
|
||||
changelog:
|
||||
sort: asc
|
||||
filters:
|
||||
exclude:
|
||||
- "^docs:"
|
||||
- "^test:"
|
||||
|
||||
release:
|
||||
draft: true
|
||||
prerelease: auto
|
||||
target_commitish: "{{ .Commit }}"
|
||||
name_template: "{{.Version}}"
|
||||
|
||||
dockers:
|
||||
- dockerfile: Dockerfile
|
||||
use: buildx
|
||||
build_flag_templates:
|
||||
- "--platform=linux/amd64"
|
||||
- "--provenance=false"
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
goamd64: v1
|
||||
extra_files:
|
||||
- .build/aria2.supervisor.conf
|
||||
- .build/entrypoint.sh
|
||||
image_templates:
|
||||
- "cloudreve/cloudreve:{{ .Tag }}-amd64"
|
||||
- dockerfile: Dockerfile
|
||||
use: buildx
|
||||
build_flag_templates:
|
||||
- "--platform=linux/arm64"
|
||||
- "--provenance=false"
|
||||
goos: linux
|
||||
goarch: arm64
|
||||
extra_files:
|
||||
- .build/aria2.supervisor.conf
|
||||
- .build/entrypoint.sh
|
||||
image_templates:
|
||||
- "cloudreve/cloudreve:{{ .Tag }}-arm64"
|
||||
|
||||
docker_manifests:
|
||||
- name_template: "cloudreve/cloudreve:latest"
|
||||
image_templates:
|
||||
- "cloudreve/cloudreve:{{ .Tag }}-amd64"
|
||||
- "cloudreve/cloudreve:{{ .Tag }}-arm64"
|
||||
- name_template: "cloudreve/cloudreve:v4"
|
||||
image_templates:
|
||||
- "cloudreve/cloudreve:{{ .Tag }}-amd64"
|
||||
- "cloudreve/cloudreve:{{ .Tag }}-arm64"
|
||||
- name_template: "cloudreve/cloudreve:{{ .Tag }}"
|
||||
image_templates:
|
||||
- "cloudreve/cloudreve:{{ .Tag }}-amd64"
|
||||
- "cloudreve/cloudreve:{{ .Tag }}-arm64"
|
||||
28
.travis.yml
28
.travis.yml
@@ -1,28 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.13.x
|
||||
git:
|
||||
depth: 1
|
||||
install:
|
||||
- go get github.com/rakyll/statik
|
||||
before_script:
|
||||
- statik -src=models -f
|
||||
script:
|
||||
- go test -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
before_deploy:
|
||||
- sudo apt-get update
|
||||
- sudo apt-get -y install gcc-mingw-w64-x86-64
|
||||
- sudo apt-get -y install gcc-arm-linux-gnueabihf libc6-dev-armhf-cross
|
||||
- chmod +x ./build.sh
|
||||
- ./build.sh -r b
|
||||
deploy:
|
||||
provider: releases
|
||||
api_key: $GITHUB_TOKEN
|
||||
file_glob: true
|
||||
file: release/*
|
||||
draft: true
|
||||
skip_cleanup: true
|
||||
on:
|
||||
tags: true
|
||||
30
Dockerfile
Normal file
30
Dockerfile
Normal file
@@ -0,0 +1,30 @@
|
||||
FROM alpine:latest
|
||||
|
||||
WORKDIR /cloudreve
|
||||
|
||||
RUN apk update \
|
||||
&& apk add --no-cache tzdata vips-tools ffmpeg libreoffice aria2 supervisor font-noto font-noto-cjk libheif libraw-tools\
|
||||
&& cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \
|
||||
&& echo "Asia/Shanghai" > /etc/timezone \
|
||||
&& mkdir -p ./data/temp/aria2 \
|
||||
&& chmod -R 766 ./data/temp/aria2
|
||||
|
||||
ENV CR_ENABLE_ARIA2=1 \
|
||||
CR_SETTING_DEFAULT_thumb_ffmpeg_enabled=1 \
|
||||
CR_SETTING_DEFAULT_thumb_vips_enabled=1 \
|
||||
CR_SETTING_DEFAULT_thumb_libreoffice_enabled=1 \
|
||||
CR_SETTING_DEFAULT_media_meta_ffprobe=1 \
|
||||
CR_SETTING_DEFAULT_thumb_libraw_enabled=1
|
||||
|
||||
COPY .build/aria2.supervisor.conf .build/entrypoint.sh ./
|
||||
COPY cloudreve ./cloudreve
|
||||
|
||||
RUN chmod +x ./cloudreve \
|
||||
&& chmod +x ./entrypoint.sh
|
||||
|
||||
EXPOSE 5212 443
|
||||
|
||||
VOLUME ["/cloudreve/data"]
|
||||
|
||||
ENTRYPOINT ["sh", "./entrypoint.sh"]
|
||||
|
||||
674
LICENSE
Normal file
674
LICENSE
Normal file
@@ -0,0 +1,674 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<http://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
||||
78
README.md
78
README.md
@@ -1,2 +1,76 @@
|
||||
# Backend-V3 [](https://travis-ci.com/HFO4/Backend-V3) [](https://codecov.io/gh/HFO4/Backend-V3)
|
||||
Still in devepolment
|
||||
[中文版本](https://github.com/cloudreve/cloudreve/blob/master/README_zh-CN.md)
|
||||
|
||||
<h1 align="center">
|
||||
<br>
|
||||
<a href="https://cloudreve.org/" alt="logo" ><img src="https://raw.githubusercontent.com/cloudreve/frontend/master/public/static/img/logo192.png" width="150"/></a>
|
||||
<br>
|
||||
Cloudreve
|
||||
<br>
|
||||
</h1>
|
||||
<h4 align="center">Self-hosted file management system with multi-cloud support.</h4>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://dev.azure.com/abslantliu/cloudreve/_build?definitionId=6">
|
||||
<img src="https://img.shields.io/github/check-runs/cloudreve/cloudreve/master"
|
||||
alt="Azure pipelines">
|
||||
</a>
|
||||
<a href="https://github.com/cloudreve/cloudreve/releases">
|
||||
<img src="https://img.shields.io/github/v/release/cloudreve/cloudreve?include_prereleases" />
|
||||
</a>
|
||||
<a href="https://github.com/cloudreve/cloudreve/releases">
|
||||
<img src="https://badgen.net/static/release%20size/34%20MB/blue"/>
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/cloudreve/cloudreve">
|
||||
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/cloudreve/cloudreve" />
|
||||
</a>
|
||||
</p>
|
||||
<p align="center">
|
||||
<a href="https://cloudreve.org">Homepage</a> •
|
||||
<a href="https://demo.cloudreve.org">Try it</a> •
|
||||
<a href="https://github.com/cloudreve/cloudreve/discussions">Discussion</a> •
|
||||
<a href="https://docs.cloudreve.org">Documents</a> •
|
||||
<a href="https://github.com/cloudreve/cloudreve/releases">Download</a> •
|
||||
<a href="https://t.me/cloudreve_official">Telegram</a> •
|
||||
<a href="https://discord.com/invite/WTpMFpZT76">Discord</a>
|
||||
</p>
|
||||
|
||||

|
||||
|
||||
## :sparkles: Features
|
||||
|
||||
- :cloud: Support storing files into Local, Remote node, OneDrive, S3 compatible API, Qiniu Kodo, Aliyun OSS, Tencent COS, Huawei Cloud OBS, Kingsoft Cloud KS3, Upyun.
|
||||
- :outbox_tray: Upload/Download in directly transmission from client to storage providers.
|
||||
- 💾 Integrate with Aria2/qBittorrent to download files in background, use multiple download nodes to share the load.
|
||||
- 📚 Compress/Extract/Preview archived files, download files in batch.
|
||||
- 💻 WebDAV support covering all storage providers.
|
||||
- :zap:Drag&Drop to upload files or folders, with parallel resumable upload support.
|
||||
- :card_file_box: Extract media metadata from files, search files by metadata or tags.
|
||||
- :family_woman_girl_boy: Multi-users with multi-groups.
|
||||
- :link: Create share links for files and folders with expiration date.
|
||||
- :eye_speech_bubble: Preview videos, images, audios, ePub files online; edit texts, diagrams, Markdown, images, Office documents online.
|
||||
- :art: Customize theme colors, dark mode, PWA application, SPA, i18n.
|
||||
- :rocket: All-in-one packaging, with all features out of the box.
|
||||
- 🌈 ... ...
|
||||
|
||||
## :hammer_and_wrench: Deploy
|
||||
|
||||
To deploy Cloudreve, you can refer to [Getting started](https://docs.cloudreve.org/overview/quickstart) for a quick local deployment to test.
|
||||
|
||||
When you're ready to deploy Cloudreve to a production environment, you can refer to [Deploy](https://docs.cloudreve.org/overview/deploy/) for a complete deployment.
|
||||
|
||||
## :gear: Build
|
||||
|
||||
Please refer to [Build](https://docs.cloudreve.org/overview/build/) for how to build Cloudreve from source code.
|
||||
|
||||
## :rocket: Contributing
|
||||
|
||||
If you're interested in contributing to Cloudreve, please refer to [Contributing](https://docs.cloudreve.org/api/contributing/) for how to contribute to Cloudreve.
|
||||
|
||||
## :alembic: Stacks
|
||||
|
||||
- [Go](https://golang.org/) + [Gin](https://github.com/gin-gonic/gin) + [ent](https://github.com/ent/ent)
|
||||
- [React](https://github.com/facebook/react) + [Redux](https://github.com/reduxjs/redux) + [Material-UI](https://github.com/mui-org/material-ui)
|
||||
|
||||
## :scroll: License
|
||||
|
||||
GPL V3
|
||||
|
||||
77
README_zh-CN.md
Normal file
77
README_zh-CN.md
Normal file
@@ -0,0 +1,77 @@
|
||||
[English Version](https://github.com/cloudreve/cloudreve/blob/master/README.md)
|
||||
|
||||
<h1 align="center">
|
||||
<br>
|
||||
<a href="https://cloudreve.org/" alt="logo" ><img src="https://raw.githubusercontent.com/cloudreve/frontend/master/public/static/img/logo192.png" width="150"/></a>
|
||||
<br>
|
||||
Cloudreve
|
||||
<br>
|
||||
</h1>
|
||||
|
||||
<h4 align="center">支持多家云存储驱动的公有云文件系统.</h4>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://dev.azure.com/abslantliu/cloudreve/_build?definitionId=6">
|
||||
<img src="https://img.shields.io/github/check-runs/cloudreve/cloudreve/master"
|
||||
alt="Azure pipelines">
|
||||
</a>
|
||||
<a href="https://github.com/cloudreve/cloudreve/releases">
|
||||
<img src="https://img.shields.io/github/v/release/cloudreve/cloudreve?include_prereleases" />
|
||||
</a>
|
||||
<a href="https://github.com/cloudreve/cloudreve/releases">
|
||||
<img src="https://badgen.net/static/release%20size/34%20MB/blue"/>
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/cloudreve/cloudreve">
|
||||
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/cloudreve/cloudreve" />
|
||||
</a>
|
||||
</p>
|
||||
<p align="center">
|
||||
<a href="https://cloudreve.org">主页</a> •
|
||||
<a href="https://demo.cloudreve.org">演示</a> •
|
||||
<a href="https://github.com/cloudreve/cloudreve/discussions">讨论</a> •
|
||||
<a href="https://docs.cloudreve.org">文档</a> •
|
||||
<a href="https://github.com/cloudreve/cloudreve/releases">下载</a> •
|
||||
<a href="https://t.me/cloudreve_official">Telegram</a> •
|
||||
<a href="https://discord.com/invite/WTpMFpZT76">Discord</a>
|
||||
</p>
|
||||
|
||||

|
||||
|
||||
## :sparkles: 特性
|
||||
|
||||
- :cloud: 支持本机、从机、七牛 Kodo、阿里云 OSS、腾讯云 COS、华为云 OBS、金山云 KS3、又拍云、OneDrive (包括世纪互联版) 、S3 兼容协议 作为存储端
|
||||
- :outbox_tray: 上传/下载 支持客户端直传,支持下载限速
|
||||
- 💾 可对接 Aria2/qBittorrent 离线下载,可使用多个从机节点分担下载任务
|
||||
- 📚 在线 压缩/解压缩/压缩包预览、多文件打包下载
|
||||
- 💻 覆盖全部存储策略的 WebDAV 协议支持
|
||||
- :zap: 拖拽上传、目录上传、并行分片上传
|
||||
- :card_file_box: 提取媒体元数据,通过元数据或标签搜索文件
|
||||
- :family_woman_girl_boy: 多用户、用户组、多存储策略
|
||||
- :link: 创建文件、目录的分享链接,可设定自动过期
|
||||
- :eye_speech_bubble: 视频、图像、音频、 ePub 在线预览,文本、Office 文档在线编辑
|
||||
- :art: 自定义配色、黑暗模式、PWA 应用、全站单页应用、国际化支持
|
||||
- :rocket: All-in-One 打包,开箱即用
|
||||
- 🌈 ... ...
|
||||
|
||||
## :hammer_and_wrench: 部署
|
||||
|
||||
你可以参考 [快速开始](https://docs.cloudreve.org/overview/quickstart) 启动一个本地实例进行体验、测试。
|
||||
|
||||
当你准备好将 Cloudreve 部署到生产环境时,可以参考 [部署](https://docs.cloudreve.org/overview/deploy/) 进行完整部署。
|
||||
|
||||
## :gear: 构建
|
||||
|
||||
你可以参考 [构建](https://docs.cloudreve.org/overview/build/) 从源代码构建 Cloudreve。
|
||||
|
||||
## :rocket: 贡献
|
||||
|
||||
如果你有兴趣为 Cloudreve 贡献代码,请参考 [贡献](https://docs.cloudreve.org/api/contributing/) 了解如何贡献。
|
||||
|
||||
## :alembic: 技术栈
|
||||
|
||||
- [Go](https://golang.org/) + [Gin](https://github.com/gin-gonic/gin) + [ent](https://github.com/ent/ent)
|
||||
- [React](https://github.com/facebook/react) + [Redux](https://github.com/reduxjs/redux) + [Material-UI](https://github.com/mui-org/material-ui)
|
||||
|
||||
## :scroll: 许可证
|
||||
|
||||
GPL V3
|
||||
12
SECURITY.md
Normal file
12
SECURITY.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
* For security issues with high-impacts (e.g. related to payments or user permission), we support 3.8.x and all 4.x version. But the fix for 4.x will released only in latest sub-version.
|
||||
* For all other security issues, we mainly support version >= 4.x (in which `x` is the latest stable sub-version).
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Please send the details about the security issue to `support@cloudreve.org`. Once the vulnerability is comfirmed or fixed, you will get updates from the email thread.
|
||||
|
||||
We will reward you with bounty/swag for success submission of securty issues.
|
||||
247
application/application.go
Normal file
247
application/application.go
Normal file
@@ -0,0 +1,247 @@
|
||||
package application
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/constants"
|
||||
"github.com/cloudreve/Cloudreve/v4/application/dependency"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/crontab"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/email"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/onedrive"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/cloudreve/Cloudreve/v4/routers"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type Server interface {
|
||||
// Start starts the Cloudreve server.
|
||||
Start() error
|
||||
PrintBanner()
|
||||
Close()
|
||||
}
|
||||
|
||||
// NewServer constructs a new Cloudreve server instance with given dependency.
|
||||
func NewServer(dep dependency.Dep) Server {
|
||||
return &server{
|
||||
dep: dep,
|
||||
logger: dep.Logger(),
|
||||
config: dep.ConfigProvider(),
|
||||
}
|
||||
}
|
||||
|
||||
type server struct {
|
||||
dep dependency.Dep
|
||||
logger logging.Logger
|
||||
dbClient *ent.Client
|
||||
config conf.ConfigProvider
|
||||
server *http.Server
|
||||
pprofServer *http.Server
|
||||
kv cache.Driver
|
||||
mailQueue email.Driver
|
||||
}
|
||||
|
||||
func (s *server) PrintBanner() {
|
||||
fmt.Print(`
|
||||
___ _ _
|
||||
/ __\ | ___ _ _ __| |_ __ _____ _____
|
||||
/ / | |/ _ \| | | |/ _ | '__/ _ \ \ / / _ \
|
||||
/ /___| | (_) | |_| | (_| | | | __/\ V / __/
|
||||
\____/|_|\___/ \__,_|\__,_|_| \___| \_/ \___|
|
||||
|
||||
V` + constants.BackendVersion + ` Commit #` + constants.LastCommit + ` Pro=` + constants.IsPro + `
|
||||
================================================
|
||||
|
||||
`)
|
||||
}
|
||||
|
||||
func (s *server) Start() error {
|
||||
// Debug 关闭时,切换为生产模式
|
||||
if !s.config.System().Debug {
|
||||
gin.SetMode(gin.ReleaseMode)
|
||||
}
|
||||
|
||||
s.kv = s.dep.KV()
|
||||
// delete all cached settings
|
||||
_ = s.kv.Delete(setting.KvSettingPrefix)
|
||||
if memKv, ok := s.kv.(*cache.MemoStore); ok {
|
||||
memKv.GarbageCollect(s.logger)
|
||||
}
|
||||
|
||||
// TODO: make sure redis is connected in dep before user traffic.
|
||||
if s.config.System().Mode == conf.MasterMode {
|
||||
s.dbClient = s.dep.DBClient()
|
||||
// TODO: make sure all dep is initialized before server start.
|
||||
s.dep.LockSystem()
|
||||
s.dep.UAParser()
|
||||
|
||||
// Initialize OneDrive credentials
|
||||
credentials, err := onedrive.RetrieveOneDriveCredentials(context.Background(), s.dep.StoragePolicyClient())
|
||||
if err != nil {
|
||||
return fmt.Errorf("faield to retrieve OneDrive credentials for CredManager: %w", err)
|
||||
}
|
||||
if err := s.dep.CredManager().Upsert(context.Background(), credentials...); err != nil {
|
||||
return fmt.Errorf("failed to upsert OneDrive credentials to CredManager: %w", err)
|
||||
}
|
||||
crontab.Register(setting.CronTypeOauthCredRefresh, func(ctx context.Context) {
|
||||
dep := dependency.FromContext(ctx)
|
||||
cred := dep.CredManager()
|
||||
cred.RefreshAll(ctx)
|
||||
})
|
||||
|
||||
// Initialize email queue before user traffic starts.
|
||||
_ = s.dep.EmailClient(context.Background())
|
||||
|
||||
// Start all queues
|
||||
s.dep.MediaMetaQueue(context.Background()).Start()
|
||||
s.dep.EntityRecycleQueue(context.Background()).Start()
|
||||
s.dep.IoIntenseQueue(context.Background()).Start()
|
||||
s.dep.RemoteDownloadQueue(context.Background()).Start()
|
||||
|
||||
// Start cron jobs
|
||||
c, err := crontab.NewCron(context.Background(), s.dep)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Start()
|
||||
|
||||
// Start node pool
|
||||
if _, err := s.dep.NodePool(context.Background()); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
s.dep.SlaveQueue(context.Background()).Start()
|
||||
}
|
||||
s.dep.ThumbQueue(context.Background()).Start()
|
||||
|
||||
api := routers.InitRouter(s.dep)
|
||||
api.TrustedPlatform = s.config.System().ProxyHeader
|
||||
s.server = &http.Server{Handler: api}
|
||||
|
||||
// Start pprof server if configured
|
||||
if pprofAddr := s.config.System().Pprof; pprofAddr != "" {
|
||||
s.pprofServer = &http.Server{
|
||||
Addr: pprofAddr,
|
||||
Handler: http.DefaultServeMux,
|
||||
}
|
||||
go func() {
|
||||
s.logger.Info("pprof server listening on %q", pprofAddr)
|
||||
if err := s.pprofServer.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
s.logger.Error("pprof server error: %s", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// 如果启用了SSL
|
||||
if s.config.SSL().CertPath != "" {
|
||||
s.logger.Info("Listening to %q", s.config.SSL().Listen)
|
||||
s.server.Addr = s.config.SSL().Listen
|
||||
if err := s.server.ListenAndServeTLS(s.config.SSL().CertPath, s.config.SSL().KeyPath); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
return fmt.Errorf("failed to listen to %q: %w", s.config.SSL().Listen, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// 如果启用了Unix
|
||||
if s.config.Unix().Listen != "" {
|
||||
// delete socket file before listening
|
||||
if _, err := os.Stat(s.config.Unix().Listen); err == nil {
|
||||
if err = os.Remove(s.config.Unix().Listen); err != nil {
|
||||
return fmt.Errorf("failed to delete socket file %q: %w", s.config.Unix().Listen, err)
|
||||
}
|
||||
}
|
||||
|
||||
s.logger.Info("Listening to %q", s.config.Unix().Listen)
|
||||
if err := s.runUnix(s.server); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
return fmt.Errorf("failed to listen to %q: %w", s.config.Unix().Listen, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
s.logger.Info("Listening to %q", s.config.System().Listen)
|
||||
s.server.Addr = s.config.System().Listen
|
||||
if err := s.server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
return fmt.Errorf("failed to listen to %q: %w", s.config.System().Listen, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *server) Close() {
|
||||
if s.dbClient != nil {
|
||||
s.logger.Info("Shutting down database connection...")
|
||||
if err := s.dbClient.Close(); err != nil {
|
||||
s.logger.Error("Failed to close database connection: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
if conf.SystemConfig.GracePeriod != 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, time.Duration(s.config.System().GracePeriod)*time.Second)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
s.dep.EventHub().Close()
|
||||
|
||||
// Shutdown http server
|
||||
if s.server != nil {
|
||||
err := s.server.Shutdown(ctx)
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to shutdown server: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown pprof server
|
||||
if s.pprofServer != nil {
|
||||
if err := s.pprofServer.Shutdown(ctx); err != nil {
|
||||
s.logger.Error("Failed to shutdown pprof server: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if s.kv != nil {
|
||||
if err := s.kv.Persist(util.DataPath(cache.DefaultCacheFile)); err != nil {
|
||||
s.logger.Warning("Failed to persist cache: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.dep.Shutdown(ctx); err != nil {
|
||||
s.logger.Warning("Failed to shutdown dependency manager: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *server) runUnix(server *http.Server) error {
|
||||
listener, err := net.Listen("unix", s.config.Unix().Listen)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer listener.Close()
|
||||
defer os.Remove(s.config.Unix().Listen)
|
||||
|
||||
if conf.UnixConfig.Perm > 0 {
|
||||
err = os.Chmod(conf.UnixConfig.Listen, os.FileMode(s.config.Unix().Perm))
|
||||
if err != nil {
|
||||
s.logger.Warning(
|
||||
"Failed to set permission to %q for socket file %q: %s",
|
||||
s.config.Unix().Perm,
|
||||
s.config.Unix().Listen,
|
||||
err,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return server.Serve(listener)
|
||||
}
|
||||
34
application/constants/constants.go
Normal file
34
application/constants/constants.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package constants
|
||||
|
||||
// These values will be injected at build time, DO NOT EDIT.
|
||||
|
||||
// BackendVersion 当前后端版本号
|
||||
var BackendVersion = "4.14.0"
|
||||
|
||||
// IsPro 是否为Pro版本
|
||||
var IsPro = "false"
|
||||
|
||||
var IsProBool = IsPro == "true"
|
||||
|
||||
// LastCommit 最后commit id
|
||||
var LastCommit = "000000"
|
||||
|
||||
const (
|
||||
APIPrefix = "/api/v4"
|
||||
APIPrefixSlave = "/api/v4/slave"
|
||||
CrHeaderPrefix = "X-Cr-"
|
||||
)
|
||||
|
||||
const CloudreveScheme = "cloudreve"
|
||||
|
||||
type (
|
||||
FileSystemType string
|
||||
)
|
||||
|
||||
const (
|
||||
FileSystemMy = FileSystemType("my")
|
||||
FileSystemShare = FileSystemType("share")
|
||||
FileSystemTrash = FileSystemType("trash")
|
||||
FileSystemSharedWithMe = FileSystemType("shared_with_me")
|
||||
FileSystemUnknown = FileSystemType("unknown")
|
||||
)
|
||||
8
application/constants/size.go
Normal file
8
application/constants/size.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package constants
|
||||
|
||||
const (
|
||||
MB = 1 << 20
|
||||
GB = 1 << 30
|
||||
TB = 1 << 40
|
||||
PB = 1 << 50
|
||||
)
|
||||
1011
application/dependency/dependency.go
Normal file
1011
application/dependency/dependency.go
Normal file
File diff suppressed because it is too large
Load Diff
175
application/dependency/options.go
Normal file
175
application/dependency/options.go
Normal file
@@ -0,0 +1,175 @@
|
||||
package dependency
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/auth"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/email"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/searcher"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/gin-contrib/static"
|
||||
)
|
||||
|
||||
// Option 发送请求的额外设置
|
||||
type Option interface {
|
||||
apply(*dependency)
|
||||
}
|
||||
|
||||
type optionFunc func(*dependency)
|
||||
|
||||
func (f optionFunc) apply(o *dependency) {
|
||||
f(o)
|
||||
}
|
||||
|
||||
// WithConfigPath Set the path of the config file.
|
||||
func WithConfigPath(p string) Option {
|
||||
return optionFunc(func(o *dependency) {
|
||||
o.configPath = p
|
||||
})
|
||||
}
|
||||
|
||||
// WithLogger Set the default logging.
|
||||
func WithLogger(l logging.Logger) Option {
|
||||
return optionFunc(func(o *dependency) {
|
||||
o.logger = l
|
||||
})
|
||||
}
|
||||
|
||||
// WithConfigProvider Set the default config provider.
|
||||
func WithConfigProvider(c conf.ConfigProvider) Option {
|
||||
return optionFunc(func(o *dependency) {
|
||||
o.configProvider = c
|
||||
})
|
||||
}
|
||||
|
||||
// WithStatics Set the default statics FS.
|
||||
func WithStatics(c fs.FS) Option {
|
||||
return optionFunc(func(o *dependency) {
|
||||
o.statics = c
|
||||
})
|
||||
}
|
||||
|
||||
// WithServerStaticFS Set the default statics FS for server.
|
||||
func WithServerStaticFS(c static.ServeFileSystem) Option {
|
||||
return optionFunc(func(o *dependency) {
|
||||
o.serverStaticFS = c
|
||||
})
|
||||
}
|
||||
|
||||
// WithProFlag Set if current instance is a pro version.
|
||||
func WithProFlag(c bool) Option {
|
||||
return optionFunc(func(o *dependency) {
|
||||
o.isPro = c
|
||||
})
|
||||
}
|
||||
|
||||
// WithRawEntClient Set the default raw ent client.
|
||||
func WithRawEntClient(c *ent.Client) Option {
|
||||
return optionFunc(func(o *dependency) {
|
||||
o.rawEntClient = c
|
||||
})
|
||||
}
|
||||
|
||||
// WithDbClient Set the default ent client.
|
||||
func WithDbClient(c *ent.Client) Option {
|
||||
return optionFunc(func(o *dependency) {
|
||||
o.dbClient = c
|
||||
})
|
||||
}
|
||||
|
||||
// WithRequiredDbVersion Set the required db version.
|
||||
func WithRequiredDbVersion(c string) Option {
|
||||
return optionFunc(func(o *dependency) {
|
||||
o.requiredDbVersion = c
|
||||
})
|
||||
}
|
||||
|
||||
// WithKV Set the default KV store driverold
|
||||
func WithKV(c cache.Driver) Option {
|
||||
return optionFunc(func(o *dependency) {
|
||||
o.kv = c
|
||||
})
|
||||
}
|
||||
|
||||
// WithSettingClient Set the default setting client
|
||||
func WithSettingClient(s inventory.SettingClient) Option {
|
||||
return optionFunc(func(o *dependency) {
|
||||
o.settingClient = s
|
||||
})
|
||||
}
|
||||
|
||||
// WithSettingProvider Set the default setting provider
|
||||
func WithSettingProvider(s setting.Provider) Option {
|
||||
return optionFunc(func(o *dependency) {
|
||||
o.settingProvider = s
|
||||
})
|
||||
}
|
||||
|
||||
// WithUserClient Set the default user client
|
||||
func WithUserClient(s inventory.UserClient) Option {
|
||||
return optionFunc(func(o *dependency) {
|
||||
o.userClient = s
|
||||
})
|
||||
}
|
||||
|
||||
// WithEmailClient Set the default email client
|
||||
func WithEmailClient(s email.Driver) Option {
|
||||
return optionFunc(func(o *dependency) {
|
||||
o.emailClient = s
|
||||
})
|
||||
}
|
||||
|
||||
// WithGeneralAuth Set the default general auth
|
||||
func WithGeneralAuth(s auth.Auth) Option {
|
||||
return optionFunc(func(o *dependency) {
|
||||
o.generalAuth = s
|
||||
})
|
||||
}
|
||||
|
||||
// WithHashIDEncoder Set the default hash id encoder
|
||||
func WithHashIDEncoder(s hashid.Encoder) Option {
|
||||
return optionFunc(func(o *dependency) {
|
||||
o.hashidEncoder = s
|
||||
})
|
||||
}
|
||||
|
||||
// WithTokenAuth Set the default token auth
|
||||
func WithTokenAuth(s auth.TokenAuth) Option {
|
||||
return optionFunc(func(o *dependency) {
|
||||
o.tokenAuth = s
|
||||
})
|
||||
}
|
||||
|
||||
// WithFileClient Set the default file client
|
||||
func WithFileClient(s inventory.FileClient) Option {
|
||||
return optionFunc(func(o *dependency) {
|
||||
o.fileClient = s
|
||||
})
|
||||
}
|
||||
|
||||
// WithShareClient Set the default share client
|
||||
func WithShareClient(s inventory.ShareClient) Option {
|
||||
return optionFunc(func(o *dependency) {
|
||||
o.shareClient = s
|
||||
})
|
||||
}
|
||||
|
||||
// WithSearchIndexer Set the default search indexer
|
||||
func WithSearchIndexer(s searcher.SearchIndexer) Option {
|
||||
return optionFunc(func(o *dependency) {
|
||||
o.searchIndexer = s
|
||||
})
|
||||
}
|
||||
|
||||
// WithTextExtractor Set the default text extractor
|
||||
func WithTextExtractor(s searcher.TextExtractor) Option {
|
||||
return optionFunc(func(o *dependency) {
|
||||
o.textExtractor = s
|
||||
})
|
||||
}
|
||||
47
application/migrator/avatars.go
Normal file
47
application/migrator/avatars.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package migrator
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
)
|
||||
|
||||
func migrateAvatars(m *Migrator) error {
|
||||
m.l.Info("Migrating avatars files...")
|
||||
avatarRoot := util.RelativePath(m.state.V3AvatarPath)
|
||||
|
||||
for uid, _ := range m.state.UserIDs {
|
||||
avatarPath := filepath.Join(avatarRoot, fmt.Sprintf("avatar_%d_2.png", uid))
|
||||
|
||||
// check if file exists
|
||||
if util.Exists(avatarPath) {
|
||||
m.l.Info("Migrating avatar for user %d", uid)
|
||||
// Copy to v4 avatar path
|
||||
v4Path := filepath.Join(util.DataPath("avatar"), fmt.Sprintf("avatar_%d.png", uid))
|
||||
|
||||
// copy
|
||||
origin, err := os.Open(avatarPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open avatar file: %w", err)
|
||||
}
|
||||
defer origin.Close()
|
||||
|
||||
dest, err := util.CreatNestedFile(v4Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create avatar file: %w", err)
|
||||
}
|
||||
defer dest.Close()
|
||||
|
||||
_, err = io.Copy(dest, origin)
|
||||
|
||||
if err != nil {
|
||||
m.l.Warning("Failed to copy avatar file: %s, skipping...", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
124
application/migrator/conf/conf.go
Normal file
124
application/migrator/conf/conf.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package conf
|
||||
|
||||
import (
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/go-ini/ini"
|
||||
"github.com/go-playground/validator/v10"
|
||||
)
|
||||
|
||||
// database 数据库
|
||||
type database struct {
|
||||
Type string
|
||||
User string
|
||||
Password string
|
||||
Host string
|
||||
Name string
|
||||
TablePrefix string
|
||||
DBFile string
|
||||
Port int
|
||||
Charset string
|
||||
UnixSocket bool
|
||||
}
|
||||
|
||||
// system 系统通用配置
|
||||
type system struct {
|
||||
Mode string `validate:"eq=master|eq=slave"`
|
||||
Listen string `validate:"required"`
|
||||
Debug bool
|
||||
SessionSecret string
|
||||
HashIDSalt string
|
||||
GracePeriod int `validate:"gte=0"`
|
||||
ProxyHeader string
|
||||
}
|
||||
|
||||
type ssl struct {
|
||||
CertPath string `validate:"omitempty,required"`
|
||||
KeyPath string `validate:"omitempty,required"`
|
||||
Listen string `validate:"required"`
|
||||
}
|
||||
|
||||
type unix struct {
|
||||
Listen string
|
||||
Perm uint32
|
||||
}
|
||||
|
||||
// slave 作为slave存储端配置
|
||||
type slave struct {
|
||||
Secret string `validate:"omitempty,gte=64"`
|
||||
CallbackTimeout int `validate:"omitempty,gte=1"`
|
||||
SignatureTTL int `validate:"omitempty,gte=1"`
|
||||
}
|
||||
|
||||
// redis 配置
|
||||
type redis struct {
|
||||
Network string
|
||||
Server string
|
||||
User string
|
||||
Password string
|
||||
DB string
|
||||
}
|
||||
|
||||
// 跨域配置
|
||||
type cors struct {
|
||||
AllowOrigins []string
|
||||
AllowMethods []string
|
||||
AllowHeaders []string
|
||||
AllowCredentials bool
|
||||
ExposeHeaders []string
|
||||
SameSite string
|
||||
Secure bool
|
||||
}
|
||||
|
||||
var cfg *ini.File
|
||||
|
||||
// Init 初始化配置文件
|
||||
func Init(l logging.Logger, path string) error {
|
||||
var err error
|
||||
|
||||
cfg, err = ini.Load(path)
|
||||
if err != nil {
|
||||
l.Error("Failed to parse config file %q: %s", path, err)
|
||||
return err
|
||||
}
|
||||
|
||||
sections := map[string]interface{}{
|
||||
"Database": DatabaseConfig,
|
||||
"System": SystemConfig,
|
||||
"SSL": SSLConfig,
|
||||
"UnixSocket": UnixConfig,
|
||||
"Redis": RedisConfig,
|
||||
"CORS": CORSConfig,
|
||||
"Slave": SlaveConfig,
|
||||
}
|
||||
for sectionName, sectionStruct := range sections {
|
||||
err = mapSection(sectionName, sectionStruct)
|
||||
if err != nil {
|
||||
l.Error("Failed to parse config section %q: %s", sectionName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// 映射数据库配置覆盖
|
||||
for _, key := range cfg.Section("OptionOverwrite").Keys() {
|
||||
OptionOverwrite[key.Name()] = key.Value()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// mapSection 将配置文件的 Section 映射到结构体上
|
||||
func mapSection(section string, confStruct interface{}) error {
|
||||
err := cfg.Section(section).MapTo(confStruct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 验证合法性
|
||||
validate := validator.New()
|
||||
err = validate.Struct(confStruct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
55
application/migrator/conf/defaults.go
Normal file
55
application/migrator/conf/defaults.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package conf
|
||||
|
||||
// RedisConfig Redis服务器配置
|
||||
var RedisConfig = &redis{
|
||||
Network: "tcp",
|
||||
Server: "",
|
||||
Password: "",
|
||||
DB: "0",
|
||||
}
|
||||
|
||||
// DatabaseConfig 数据库配置
|
||||
var DatabaseConfig = &database{
|
||||
Type: "UNSET",
|
||||
Charset: "utf8",
|
||||
DBFile: "cloudreve.db",
|
||||
Port: 3306,
|
||||
UnixSocket: false,
|
||||
}
|
||||
|
||||
// SystemConfig 系统公用配置
|
||||
var SystemConfig = &system{
|
||||
Debug: false,
|
||||
Mode: "master",
|
||||
Listen: ":5212",
|
||||
ProxyHeader: "",
|
||||
}
|
||||
|
||||
// CORSConfig 跨域配置
|
||||
var CORSConfig = &cors{
|
||||
AllowOrigins: []string{"UNSET"},
|
||||
AllowMethods: []string{"PUT", "POST", "GET", "OPTIONS"},
|
||||
AllowHeaders: []string{"Cookie", "X-Cr-Policy", "Authorization", "Content-Length", "Content-Type", "X-Cr-Path", "X-Cr-FileName"},
|
||||
AllowCredentials: false,
|
||||
ExposeHeaders: nil,
|
||||
SameSite: "Default",
|
||||
Secure: false,
|
||||
}
|
||||
|
||||
// SlaveConfig 从机配置
|
||||
var SlaveConfig = &slave{
|
||||
CallbackTimeout: 20,
|
||||
SignatureTTL: 60,
|
||||
}
|
||||
|
||||
var SSLConfig = &ssl{
|
||||
Listen: ":443",
|
||||
CertPath: "",
|
||||
KeyPath: "",
|
||||
}
|
||||
|
||||
var UnixConfig = &unix{
|
||||
Listen: "",
|
||||
}
|
||||
|
||||
var OptionOverwrite = map[string]interface{}{}
|
||||
82
application/migrator/directlink.go
Normal file
82
application/migrator/directlink.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package migrator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/file"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
)
|
||||
|
||||
func (m *Migrator) migrateDirectLink() error {
|
||||
m.l.Info("Migrating direct links...")
|
||||
batchSize := 1000
|
||||
offset := m.state.DirectLinkOffset
|
||||
ctx := context.Background()
|
||||
|
||||
if m.state.DirectLinkOffset > 0 {
|
||||
m.l.Info("Resuming direct link migration from offset %d", offset)
|
||||
}
|
||||
|
||||
for {
|
||||
m.l.Info("Migrating direct links with offset %d", offset)
|
||||
var directLinks []model.SourceLink
|
||||
if err := model.DB.Limit(batchSize).Offset(offset).Find(&directLinks).Error; err != nil {
|
||||
return fmt.Errorf("failed to list v3 direct links: %w", err)
|
||||
}
|
||||
|
||||
if len(directLinks) == 0 {
|
||||
if m.dep.ConfigProvider().Database().Type == conf.PostgresDB {
|
||||
m.l.Info("Resetting direct link ID sequence for postgres...")
|
||||
m.v4client.DirectLink.ExecContext(ctx, "SELECT SETVAL('direct_links_id_seq', (SELECT MAX(id) FROM direct_links))")
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
tx, err := m.v4client.Tx(ctx)
|
||||
if err != nil {
|
||||
_ = tx.Rollback()
|
||||
return fmt.Errorf("failed to start transaction: %w", err)
|
||||
}
|
||||
|
||||
for _, dl := range directLinks {
|
||||
sourceId := int(dl.FileID) + m.state.LastFolderID
|
||||
// check if file exists
|
||||
_, err = tx.File.Query().Where(file.ID(sourceId)).First(ctx)
|
||||
if err != nil {
|
||||
m.l.Warning("File %d not found, skipping direct link %d", sourceId, dl.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
stm := tx.DirectLink.Create().
|
||||
SetCreatedAt(formatTime(dl.CreatedAt)).
|
||||
SetUpdatedAt(formatTime(dl.UpdatedAt)).
|
||||
SetRawID(int(dl.ID)).
|
||||
SetFileID(sourceId).
|
||||
SetName(dl.Name).
|
||||
SetDownloads(dl.Downloads).
|
||||
SetSpeed(0)
|
||||
|
||||
if _, err := stm.Save(ctx); err != nil {
|
||||
_ = tx.Rollback()
|
||||
return fmt.Errorf("failed to create direct link %d: %w", dl.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return fmt.Errorf("failed to commit transaction: %w", err)
|
||||
}
|
||||
|
||||
offset += batchSize
|
||||
m.state.DirectLinkOffset = offset
|
||||
if err := m.saveState(); err != nil {
|
||||
m.l.Warning("Failed to save state after direct link batch: %s", err)
|
||||
} else {
|
||||
m.l.Info("Saved migration state after processing this batch")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
189
application/migrator/file.go
Normal file
189
application/migrator/file.go
Normal file
@@ -0,0 +1,189 @@
|
||||
package migrator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
)
|
||||
|
||||
func (m *Migrator) migrateFile() error {
|
||||
m.l.Info("Migrating files...")
|
||||
batchSize := 1000
|
||||
offset := m.state.FileOffset
|
||||
ctx := context.Background()
|
||||
|
||||
if m.state.FileConflictRename == nil {
|
||||
m.state.FileConflictRename = make(map[uint]string)
|
||||
}
|
||||
|
||||
if m.state.EntitySources == nil {
|
||||
m.state.EntitySources = make(map[string]int)
|
||||
}
|
||||
|
||||
if offset > 0 {
|
||||
m.l.Info("Resuming file migration from offset %d", offset)
|
||||
}
|
||||
|
||||
out:
|
||||
for {
|
||||
m.l.Info("Migrating files with offset %d", offset)
|
||||
var files []model.File
|
||||
if err := model.DB.Limit(batchSize).Offset(offset).Find(&files).Error; err != nil {
|
||||
return fmt.Errorf("failed to list v3 files: %w", err)
|
||||
}
|
||||
|
||||
if len(files) == 0 {
|
||||
if m.dep.ConfigProvider().Database().Type == conf.PostgresDB {
|
||||
m.l.Info("Resetting file ID sequence for postgres...")
|
||||
m.v4client.File.ExecContext(ctx, "SELECT SETVAL('files_id_seq', (SELECT MAX(id) FROM files))")
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
tx, err := m.v4client.Tx(ctx)
|
||||
if err != nil {
|
||||
_ = tx.Rollback()
|
||||
return fmt.Errorf("failed to start transaction: %w", err)
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
if _, ok := m.state.FolderIDs[int(f.FolderID)]; !ok {
|
||||
m.l.Warning("Folder ID %d for file %d not found, skipping", f.FolderID, f.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := m.state.UserIDs[int(f.UserID)]; !ok {
|
||||
m.l.Warning("User ID %d for file %d not found, skipping", f.UserID, f.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := m.state.PolicyIDs[int(f.PolicyID)]; !ok {
|
||||
m.l.Warning("Policy ID %d for file %d not found, skipping", f.PolicyID, f.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
metadata := make(map[string]string)
|
||||
if f.Metadata != "" {
|
||||
json.Unmarshal([]byte(f.Metadata), &metadata)
|
||||
}
|
||||
|
||||
var (
|
||||
thumbnail *ent.Entity
|
||||
entity *ent.Entity
|
||||
err error
|
||||
)
|
||||
|
||||
if metadata[model.ThumbStatusMetadataKey] == model.ThumbStatusExist {
|
||||
size := int64(0)
|
||||
if m.state.LocalPolicyIDs[int(f.PolicyID)] {
|
||||
thumbFile, err := os.Stat(f.SourceName + m.state.ThumbSuffix)
|
||||
if err == nil {
|
||||
size = thumbFile.Size()
|
||||
}
|
||||
m.l.Warning("Thumbnail file %s for file %d not found, use 0 size", f.SourceName+m.state.ThumbSuffix, f.ID)
|
||||
}
|
||||
// Insert thumbnail entity
|
||||
thumbnail, err = m.insertEntity(tx, f.SourceName+m.state.ThumbSuffix, int(types.EntityTypeThumbnail), int(f.PolicyID), int(f.UserID), size)
|
||||
if err != nil {
|
||||
_ = tx.Rollback()
|
||||
return fmt.Errorf("failed to insert thumbnail entity: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Insert file version entity
|
||||
entity, err = m.insertEntity(tx, f.SourceName, int(types.EntityTypeVersion), int(f.PolicyID), int(f.UserID), int64(f.Size))
|
||||
if err != nil {
|
||||
_ = tx.Rollback()
|
||||
return fmt.Errorf("failed to insert file version entity: %w", err)
|
||||
}
|
||||
|
||||
fname := f.Name
|
||||
if _, ok := m.state.FileConflictRename[f.ID]; ok {
|
||||
fname = m.state.FileConflictRename[f.ID]
|
||||
}
|
||||
|
||||
stm := tx.File.Create().
|
||||
SetCreatedAt(formatTime(f.CreatedAt)).
|
||||
SetUpdatedAt(formatTime(f.UpdatedAt)).
|
||||
SetName(fname).
|
||||
SetRawID(int(f.ID) + m.state.LastFolderID).
|
||||
SetOwnerID(int(f.UserID)).
|
||||
SetSize(int64(f.Size)).
|
||||
SetPrimaryEntity(entity.ID).
|
||||
SetFileChildren(int(f.FolderID)).
|
||||
SetType(int(types.FileTypeFile)).
|
||||
SetStoragePoliciesID(int(f.PolicyID)).
|
||||
AddEntities(entity)
|
||||
|
||||
if thumbnail != nil {
|
||||
stm.AddEntities(thumbnail)
|
||||
}
|
||||
|
||||
if _, err := stm.Save(ctx); err != nil {
|
||||
_ = tx.Rollback()
|
||||
if ent.IsConstraintError(err) {
|
||||
if _, ok := m.state.FileConflictRename[f.ID]; ok {
|
||||
return fmt.Errorf("file %d already exists, but new name is already in conflict rename map, please resolve this manually", f.ID)
|
||||
}
|
||||
|
||||
m.l.Warning("File %d already exists, will retry with new name in next batch", f.ID)
|
||||
m.state.FileConflictRename[f.ID] = fmt.Sprintf("%d_%s", f.ID, f.Name)
|
||||
continue out
|
||||
}
|
||||
return fmt.Errorf("failed to create file %d: %w", f.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return fmt.Errorf("failed to commit transaction: %w", err)
|
||||
}
|
||||
|
||||
offset += batchSize
|
||||
m.state.FileOffset = offset
|
||||
if err := m.saveState(); err != nil {
|
||||
m.l.Warning("Failed to save state after file batch: %s", err)
|
||||
} else {
|
||||
m.l.Info("Saved migration state after processing this batch")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Migrator) insertEntity(tx *ent.Tx, source string, entityType, policyID, createdBy int, size int64) (*ent.Entity, error) {
|
||||
|
||||
// find existing one
|
||||
entityKey := strconv.Itoa(policyID) + "+" + source
|
||||
if existingId, ok := m.state.EntitySources[entityKey]; ok {
|
||||
existing, err := tx.Entity.UpdateOneID(existingId).
|
||||
AddReferenceCount(1).
|
||||
Save(context.Background())
|
||||
if err == nil {
|
||||
return existing, nil
|
||||
}
|
||||
m.l.Warning("Failed to update existing entity %d: %s, fallback to create new one.", existingId, err)
|
||||
}
|
||||
|
||||
// create new one
|
||||
e, err := tx.Entity.Create().
|
||||
SetSource(source).
|
||||
SetType(entityType).
|
||||
SetSize(size).
|
||||
SetStoragePolicyEntities(policyID).
|
||||
SetCreatedBy(createdBy).
|
||||
SetReferenceCount(1).
|
||||
Save(context.Background())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create new entity: %w", err)
|
||||
}
|
||||
|
||||
m.state.EntitySources[entityKey] = e.ID
|
||||
return e, nil
|
||||
}
|
||||
147
application/migrator/folders.go
Normal file
147
application/migrator/folders.go
Normal file
@@ -0,0 +1,147 @@
|
||||
package migrator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
)
|
||||
|
||||
func (m *Migrator) migrateFolders() error {
|
||||
m.l.Info("Migrating folders...")
|
||||
batchSize := 1000
|
||||
// Start from the saved offset if available
|
||||
offset := m.state.FolderOffset
|
||||
ctx := context.Background()
|
||||
foldersCount := 0
|
||||
|
||||
if m.state.FolderIDs == nil {
|
||||
m.state.FolderIDs = make(map[int]bool)
|
||||
}
|
||||
|
||||
if offset > 0 {
|
||||
m.l.Info("Resuming folder migration from offset %d", offset)
|
||||
}
|
||||
|
||||
for {
|
||||
m.l.Info("Migrating folders with offset %d", offset)
|
||||
var folders []model.Folder
|
||||
if err := model.DB.Limit(batchSize).Offset(offset).Find(&folders).Error; err != nil {
|
||||
return fmt.Errorf("failed to list v3 folders: %w", err)
|
||||
}
|
||||
|
||||
if len(folders) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
tx, err := m.v4client.Tx(ctx)
|
||||
if err != nil {
|
||||
_ = tx.Rollback()
|
||||
return fmt.Errorf("failed to start transaction: %w", err)
|
||||
}
|
||||
|
||||
batchFoldersCount := 0
|
||||
for _, f := range folders {
|
||||
if _, ok := m.state.UserIDs[int(f.OwnerID)]; !ok {
|
||||
m.l.Warning("Owner ID %d not found, skipping folder %d", f.OwnerID, f.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
isRoot := f.ParentID == nil
|
||||
if isRoot {
|
||||
f.Name = ""
|
||||
} else if *f.ParentID == 0 {
|
||||
m.l.Warning("Parent ID %d not found, skipping folder %d", *f.ParentID, f.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
stm := tx.File.Create().
|
||||
SetRawID(int(f.ID)).
|
||||
SetType(int(types.FileTypeFolder)).
|
||||
SetCreatedAt(formatTime(f.CreatedAt)).
|
||||
SetUpdatedAt(formatTime(f.UpdatedAt)).
|
||||
SetName(f.Name).
|
||||
SetOwnerID(int(f.OwnerID))
|
||||
|
||||
if _, err := stm.Save(ctx); err != nil {
|
||||
_ = tx.Rollback()
|
||||
return fmt.Errorf("failed to create folder %d: %w", f.ID, err)
|
||||
}
|
||||
|
||||
m.state.FolderIDs[int(f.ID)] = true
|
||||
m.state.LastFolderID = int(f.ID)
|
||||
|
||||
foldersCount++
|
||||
batchFoldersCount++
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return fmt.Errorf("failed to commit transaction: %w", err)
|
||||
}
|
||||
|
||||
// Update the offset in state and save after each batch
|
||||
offset += batchSize
|
||||
m.state.FolderOffset = offset
|
||||
if err := m.saveState(); err != nil {
|
||||
m.l.Warning("Failed to save state after folder batch: %s", err)
|
||||
} else {
|
||||
m.l.Info("Saved migration state after processing %d folders in this batch", batchFoldersCount)
|
||||
}
|
||||
}
|
||||
|
||||
m.l.Info("Successfully migrated %d folders", foldersCount)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Migrator) migrateFolderParent() error {
|
||||
m.l.Info("Migrating folder parent...")
|
||||
batchSize := 1000
|
||||
offset := m.state.FolderParentOffset
|
||||
ctx := context.Background()
|
||||
|
||||
for {
|
||||
m.l.Info("Migrating folder parent with offset %d", offset)
|
||||
var folderParents []model.Folder
|
||||
if err := model.DB.Limit(batchSize).Offset(offset).Find(&folderParents).Error; err != nil {
|
||||
return fmt.Errorf("failed to list v3 folder parents: %w", err)
|
||||
}
|
||||
|
||||
if len(folderParents) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
tx, err := m.v4client.Tx(ctx)
|
||||
if err != nil {
|
||||
_ = tx.Rollback()
|
||||
return fmt.Errorf("failed to start transaction: %w", err)
|
||||
}
|
||||
|
||||
for _, f := range folderParents {
|
||||
if f.ParentID != nil {
|
||||
if _, ok := m.state.FolderIDs[int(*f.ParentID)]; !ok {
|
||||
m.l.Warning("Folder ID %d not found, skipping folder parent %d", f.ID, f.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := tx.File.UpdateOneID(int(f.ID)).SetParentID(int(*f.ParentID)).Save(ctx); err != nil {
|
||||
_ = tx.Rollback()
|
||||
return fmt.Errorf("failed to update folder parent %d: %w", f.ID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return fmt.Errorf("failed to commit transaction: %w", err)
|
||||
}
|
||||
|
||||
// Update the offset in state and save after each batch
|
||||
offset += batchSize
|
||||
m.state.FolderParentOffset = offset
|
||||
if err := m.saveState(); err != nil {
|
||||
m.l.Warning("Failed to save state after folder parent batch: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
92
application/migrator/group.go
Normal file
92
application/migrator/group.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package migrator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
func (m *Migrator) migrateGroup() error {
|
||||
m.l.Info("Migrating groups...")
|
||||
|
||||
var groups []model.Group
|
||||
if err := model.DB.Find(&groups).Error; err != nil {
|
||||
return fmt.Errorf("failed to list v3 groups: %w", err)
|
||||
}
|
||||
|
||||
for _, group := range groups {
|
||||
cap := &boolset.BooleanSet{}
|
||||
var (
|
||||
opts model.GroupOption
|
||||
policies []int
|
||||
)
|
||||
if err := json.Unmarshal([]byte(group.Options), &opts); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal options for group %q: %w", group.Name, err)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal([]byte(group.Policies), &policies); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal policies for group %q: %w", group.Name, err)
|
||||
}
|
||||
|
||||
policies = lo.Filter(policies, func(id int, _ int) bool {
|
||||
_, exist := m.state.PolicyIDs[id]
|
||||
return exist
|
||||
})
|
||||
|
||||
newOpts := &types.GroupSetting{
|
||||
CompressSize: int64(opts.CompressSize),
|
||||
DecompressSize: int64(opts.DecompressSize),
|
||||
RemoteDownloadOptions: opts.Aria2Options,
|
||||
SourceBatchSize: opts.SourceBatchSize,
|
||||
RedirectedSource: opts.RedirectedSource,
|
||||
Aria2BatchSize: opts.Aria2BatchSize,
|
||||
MaxWalkedFiles: 100000,
|
||||
TrashRetention: 7 * 24 * 3600,
|
||||
}
|
||||
|
||||
boolset.Sets(map[types.GroupPermission]bool{
|
||||
types.GroupPermissionIsAdmin: group.ID == 1,
|
||||
types.GroupPermissionIsAnonymous: group.ID == 3,
|
||||
types.GroupPermissionShareDownload: opts.ShareDownload,
|
||||
types.GroupPermissionWebDAV: group.WebDAVEnabled,
|
||||
types.GroupPermissionArchiveDownload: opts.ArchiveDownload,
|
||||
types.GroupPermissionArchiveTask: opts.ArchiveTask,
|
||||
types.GroupPermissionWebDAVProxy: opts.WebDAVProxy,
|
||||
types.GroupPermissionRemoteDownload: opts.Aria2,
|
||||
types.GroupPermissionAdvanceDelete: opts.AdvanceDelete,
|
||||
types.GroupPermissionShare: group.ShareEnabled,
|
||||
types.GroupPermissionRedirectedSource: opts.RedirectedSource,
|
||||
}, cap)
|
||||
|
||||
stm := m.v4client.Group.Create().
|
||||
SetRawID(int(group.ID)).
|
||||
SetCreatedAt(formatTime(group.CreatedAt)).
|
||||
SetUpdatedAt(formatTime(group.UpdatedAt)).
|
||||
SetName(group.Name).
|
||||
SetMaxStorage(int64(group.MaxStorage)).
|
||||
SetSpeedLimit(group.SpeedLimit).
|
||||
SetPermissions(cap).
|
||||
SetSettings(newOpts)
|
||||
|
||||
if len(policies) > 0 {
|
||||
stm.SetStoragePoliciesID(policies[0])
|
||||
}
|
||||
|
||||
if _, err := stm.Save(context.Background()); err != nil {
|
||||
return fmt.Errorf("failed to create group %q: %w", group.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
if m.dep.ConfigProvider().Database().Type == conf.PostgresDB {
|
||||
m.l.Info("Resetting group ID sequence for postgres...")
|
||||
m.v4client.Group.ExecContext(context.Background(), "SELECT SETVAL('groups_id_seq', (SELECT MAX(id) FROM groups))")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
314
application/migrator/migrator.go
Normal file
314
application/migrator/migrator.go
Normal file
@@ -0,0 +1,314 @@
|
||||
package migrator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/dependency"
|
||||
"github.com/cloudreve/Cloudreve/v4/application/migrator/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
)
|
||||
|
||||
// State stores the migration progress
|
||||
type State struct {
|
||||
PolicyIDs map[int]bool `json:"policy_ids,omitempty"`
|
||||
LocalPolicyIDs map[int]bool `json:"local_policy_ids,omitempty"`
|
||||
UserIDs map[int]bool `json:"user_ids,omitempty"`
|
||||
FolderIDs map[int]bool `json:"folder_ids,omitempty"`
|
||||
EntitySources map[string]int `json:"entity_sources,omitempty"`
|
||||
LastFolderID int `json:"last_folder_id,omitempty"`
|
||||
Step int `json:"step,omitempty"`
|
||||
UserOffset int `json:"user_offset,omitempty"`
|
||||
FolderOffset int `json:"folder_offset,omitempty"`
|
||||
FileOffset int `json:"file_offset,omitempty"`
|
||||
ShareOffset int `json:"share_offset,omitempty"`
|
||||
GiftCodeOffset int `json:"gift_code_offset,omitempty"`
|
||||
DirectLinkOffset int `json:"direct_link_offset,omitempty"`
|
||||
WebdavOffset int `json:"webdav_offset,omitempty"`
|
||||
StoragePackOffset int `json:"storage_pack_offset,omitempty"`
|
||||
FileConflictRename map[uint]string `json:"file_conflict_rename,omitempty"`
|
||||
FolderParentOffset int `json:"folder_parent_offset,omitempty"`
|
||||
ThumbSuffix string `json:"thumb_suffix,omitempty"`
|
||||
V3AvatarPath string `json:"v3_avatar_path,omitempty"`
|
||||
}
|
||||
|
||||
// Step identifiers for migration phases
|
||||
const (
|
||||
StepInitial = 0
|
||||
StepSchema = 1
|
||||
StepSettings = 2
|
||||
StepNode = 3
|
||||
StepPolicy = 4
|
||||
StepGroup = 5
|
||||
StepUser = 6
|
||||
StepFolders = 7
|
||||
StepFolderParent = 8
|
||||
StepFile = 9
|
||||
StepShare = 10
|
||||
StepDirectLink = 11
|
||||
Step_CommunityPlaceholder1 = 12
|
||||
Step_CommunityPlaceholder2 = 13
|
||||
StepAvatar = 14
|
||||
StepWebdav = 15
|
||||
StepCompleted = 16
|
||||
StateFileName = "migration_state.json"
|
||||
)
|
||||
|
||||
type Migrator struct {
|
||||
dep dependency.Dep
|
||||
l logging.Logger
|
||||
v4client *ent.Client
|
||||
state *State
|
||||
statePath string
|
||||
}
|
||||
|
||||
func NewMigrator(dep dependency.Dep, v3ConfPath string) (*Migrator, error) {
|
||||
m := &Migrator{
|
||||
dep: dep,
|
||||
l: dep.Logger(),
|
||||
state: &State{
|
||||
PolicyIDs: make(map[int]bool),
|
||||
UserIDs: make(map[int]bool),
|
||||
Step: StepInitial,
|
||||
UserOffset: 0,
|
||||
FolderOffset: 0,
|
||||
},
|
||||
}
|
||||
|
||||
// Determine state file path
|
||||
configDir := filepath.Dir(v3ConfPath)
|
||||
m.statePath = filepath.Join(configDir, StateFileName)
|
||||
|
||||
// Try to load existing state
|
||||
if util.Exists(m.statePath) {
|
||||
m.l.Info("Found existing migration state file, loading from %s", m.statePath)
|
||||
if err := m.loadState(); err != nil {
|
||||
return nil, fmt.Errorf("failed to load migration state: %w", err)
|
||||
}
|
||||
|
||||
stepName := "unknown"
|
||||
switch m.state.Step {
|
||||
case StepInitial:
|
||||
stepName = "initial"
|
||||
case StepSchema:
|
||||
stepName = "schema creation"
|
||||
case StepSettings:
|
||||
stepName = "settings migration"
|
||||
case StepNode:
|
||||
stepName = "node migration"
|
||||
case StepPolicy:
|
||||
stepName = "policy migration"
|
||||
case StepGroup:
|
||||
stepName = "group migration"
|
||||
case StepUser:
|
||||
stepName = "user migration"
|
||||
case StepFolders:
|
||||
stepName = "folders migration"
|
||||
case StepCompleted:
|
||||
stepName = "completed"
|
||||
case StepWebdav:
|
||||
stepName = "webdav migration"
|
||||
case StepAvatar:
|
||||
stepName = "avatar migration"
|
||||
|
||||
}
|
||||
|
||||
m.l.Info("Resumed migration from step %d (%s)", m.state.Step, stepName)
|
||||
|
||||
// Log batch information if applicable
|
||||
if m.state.Step == StepUser && m.state.UserOffset > 0 {
|
||||
m.l.Info("Will resume user migration from batch offset %d", m.state.UserOffset)
|
||||
}
|
||||
if m.state.Step == StepFolders && m.state.FolderOffset > 0 {
|
||||
m.l.Info("Will resume folder migration from batch offset %d", m.state.FolderOffset)
|
||||
}
|
||||
}
|
||||
|
||||
err := conf.Init(m.dep.Logger(), v3ConfPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = model.Init()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v4client, err := inventory.NewRawEntClient(m.l, m.dep.ConfigProvider())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m.v4client = v4client
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// saveState persists migration state to file
|
||||
func (m *Migrator) saveState() error {
|
||||
data, err := json.Marshal(m.state)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal state: %w", err)
|
||||
}
|
||||
|
||||
return os.WriteFile(m.statePath, data, 0644)
|
||||
}
|
||||
|
||||
// loadState reads migration state from file
|
||||
func (m *Migrator) loadState() error {
|
||||
data, err := os.ReadFile(m.statePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read state file: %w", err)
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, m.state)
|
||||
}
|
||||
|
||||
// updateStep updates current step and persists state
|
||||
func (m *Migrator) updateStep(step int) error {
|
||||
m.state.Step = step
|
||||
return m.saveState()
|
||||
}
|
||||
|
||||
func (m *Migrator) Migrate() error {
|
||||
// Continue from the current step
|
||||
if m.state.Step <= StepSchema {
|
||||
m.l.Info("Creating basic v4 table schema...")
|
||||
if err := m.v4client.Schema.Create(context.Background()); err != nil {
|
||||
return fmt.Errorf("failed creating schema resources: %w", err)
|
||||
}
|
||||
if err := m.updateStep(StepSettings); err != nil {
|
||||
return fmt.Errorf("failed to update step: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if m.state.Step <= StepSettings {
|
||||
if err := m.migrateSettings(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.updateStep(StepNode); err != nil {
|
||||
return fmt.Errorf("failed to update step: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if m.state.Step <= StepNode {
|
||||
if err := m.migrateNode(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.updateStep(StepPolicy); err != nil {
|
||||
return fmt.Errorf("failed to update step: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if m.state.Step <= StepPolicy {
|
||||
allPolicyIDs, err := m.migratePolicy()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.state.PolicyIDs = allPolicyIDs
|
||||
if err := m.updateStep(StepGroup); err != nil {
|
||||
return fmt.Errorf("failed to update step: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if m.state.Step <= StepGroup {
|
||||
if err := m.migrateGroup(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.updateStep(StepUser); err != nil {
|
||||
return fmt.Errorf("failed to update step: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if m.state.Step <= StepUser {
|
||||
if err := m.migrateUser(); err != nil {
|
||||
m.saveState()
|
||||
return err
|
||||
}
|
||||
// Reset user offset after completion
|
||||
m.state.UserOffset = 0
|
||||
if err := m.updateStep(StepFolders); err != nil {
|
||||
return fmt.Errorf("failed to update step: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if m.state.Step <= StepFolders {
|
||||
if err := m.migrateFolders(); err != nil {
|
||||
m.saveState()
|
||||
return err
|
||||
}
|
||||
// Reset folder offset after completion
|
||||
m.state.FolderOffset = 0
|
||||
if err := m.updateStep(StepFolderParent); err != nil {
|
||||
return fmt.Errorf("failed to update step: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if m.state.Step <= StepFolderParent {
|
||||
if err := m.migrateFolderParent(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.updateStep(StepFile); err != nil {
|
||||
return fmt.Errorf("failed to update step: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if m.state.Step <= StepFile {
|
||||
if err := m.migrateFile(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.updateStep(StepShare); err != nil {
|
||||
return fmt.Errorf("failed to update step: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if m.state.Step <= StepShare {
|
||||
if err := m.migrateShare(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.updateStep(StepDirectLink); err != nil {
|
||||
return fmt.Errorf("failed to update step: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if m.state.Step <= StepDirectLink {
|
||||
if err := m.migrateDirectLink(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.updateStep(StepAvatar); err != nil {
|
||||
return fmt.Errorf("failed to update step: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if m.state.Step <= StepAvatar {
|
||||
if err := migrateAvatars(m); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.updateStep(StepWebdav); err != nil {
|
||||
return fmt.Errorf("failed to update step: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if m.state.Step <= StepWebdav {
|
||||
if err := m.migrateWebdav(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.updateStep(StepCompleted); err != nil {
|
||||
return fmt.Errorf("failed to update step: %w", err)
|
||||
}
|
||||
}
|
||||
m.l.Info("Migration completed successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
func formatTime(t time.Time) time.Time {
|
||||
newTime := time.UnixMilli(t.UnixMilli())
|
||||
return newTime
|
||||
}
|
||||
288
application/migrator/model/dialects/dialect_sqlite.go
Normal file
288
application/migrator/model/dialects/dialect_sqlite.go
Normal file
@@ -0,0 +1,288 @@
|
||||
package dialects
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/jinzhu/gorm"
|
||||
)
|
||||
|
||||
var keyNameRegex = regexp.MustCompile("[^a-zA-Z0-9]+")
|
||||
|
||||
// DefaultForeignKeyNamer contains the default foreign key name generator method
|
||||
type DefaultForeignKeyNamer struct {
|
||||
}
|
||||
|
||||
type commonDialect struct {
|
||||
db gorm.SQLCommon
|
||||
DefaultForeignKeyNamer
|
||||
}
|
||||
|
||||
func (commonDialect) GetName() string {
|
||||
return "common"
|
||||
}
|
||||
|
||||
func (s *commonDialect) SetDB(db gorm.SQLCommon) {
|
||||
s.db = db
|
||||
}
|
||||
|
||||
func (commonDialect) BindVar(i int) string {
|
||||
return "$$$" // ?
|
||||
}
|
||||
|
||||
func (commonDialect) Quote(key string) string {
|
||||
return fmt.Sprintf(`"%s"`, key)
|
||||
}
|
||||
|
||||
func (s *commonDialect) fieldCanAutoIncrement(field *gorm.StructField) bool {
|
||||
if value, ok := field.TagSettingsGet("AUTO_INCREMENT"); ok {
|
||||
return strings.ToLower(value) != "false"
|
||||
}
|
||||
return field.IsPrimaryKey
|
||||
}
|
||||
|
||||
func (s *commonDialect) DataTypeOf(field *gorm.StructField) string {
|
||||
var dataValue, sqlType, size, additionalType = gorm.ParseFieldStructForDialect(field, s)
|
||||
|
||||
if sqlType == "" {
|
||||
switch dataValue.Kind() {
|
||||
case reflect.Bool:
|
||||
sqlType = "BOOLEAN"
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr:
|
||||
if s.fieldCanAutoIncrement(field) {
|
||||
sqlType = "INTEGER AUTO_INCREMENT"
|
||||
} else {
|
||||
sqlType = "INTEGER"
|
||||
}
|
||||
case reflect.Int64, reflect.Uint64:
|
||||
if s.fieldCanAutoIncrement(field) {
|
||||
sqlType = "BIGINT AUTO_INCREMENT"
|
||||
} else {
|
||||
sqlType = "BIGINT"
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
sqlType = "FLOAT"
|
||||
case reflect.String:
|
||||
if size > 0 && size < 65532 {
|
||||
sqlType = fmt.Sprintf("VARCHAR(%d)", size)
|
||||
} else {
|
||||
sqlType = "VARCHAR(65532)"
|
||||
}
|
||||
case reflect.Struct:
|
||||
if _, ok := dataValue.Interface().(time.Time); ok {
|
||||
sqlType = "TIMESTAMP"
|
||||
}
|
||||
default:
|
||||
if _, ok := dataValue.Interface().([]byte); ok {
|
||||
if size > 0 && size < 65532 {
|
||||
sqlType = fmt.Sprintf("BINARY(%d)", size)
|
||||
} else {
|
||||
sqlType = "BINARY(65532)"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sqlType == "" {
|
||||
panic(fmt.Sprintf("invalid sql type %s (%s) for commonDialect", dataValue.Type().Name(), dataValue.Kind().String()))
|
||||
}
|
||||
|
||||
if strings.TrimSpace(additionalType) == "" {
|
||||
return sqlType
|
||||
}
|
||||
return fmt.Sprintf("%v %v", sqlType, additionalType)
|
||||
}
|
||||
|
||||
func currentDatabaseAndTable(dialect gorm.Dialect, tableName string) (string, string) {
|
||||
if strings.Contains(tableName, ".") {
|
||||
splitStrings := strings.SplitN(tableName, ".", 2)
|
||||
return splitStrings[0], splitStrings[1]
|
||||
}
|
||||
return dialect.CurrentDatabase(), tableName
|
||||
}
|
||||
|
||||
func (s commonDialect) HasIndex(tableName string, indexName string) bool {
|
||||
var count int
|
||||
currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
|
||||
s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.STATISTICS WHERE table_schema = ? AND table_name = ? AND index_name = ?", currentDatabase, tableName, indexName).Scan(&count)
|
||||
return count > 0
|
||||
}
|
||||
|
||||
func (s commonDialect) RemoveIndex(tableName string, indexName string) error {
|
||||
_, err := s.db.Exec(fmt.Sprintf("DROP INDEX %v", indexName))
|
||||
return err
|
||||
}
|
||||
|
||||
func (s commonDialect) HasForeignKey(tableName string, foreignKeyName string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (s commonDialect) HasTable(tableName string) bool {
|
||||
var count int
|
||||
currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
|
||||
s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = ? AND table_name = ?", currentDatabase, tableName).Scan(&count)
|
||||
return count > 0
|
||||
}
|
||||
|
||||
func (s commonDialect) HasColumn(tableName string, columnName string) bool {
|
||||
var count int
|
||||
currentDatabase, tableName := currentDatabaseAndTable(&s, tableName)
|
||||
s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = ? AND table_name = ? AND column_name = ?", currentDatabase, tableName, columnName).Scan(&count)
|
||||
return count > 0
|
||||
}
|
||||
|
||||
func (s commonDialect) ModifyColumn(tableName string, columnName string, typ string) error {
|
||||
_, err := s.db.Exec(fmt.Sprintf("ALTER TABLE %v ALTER COLUMN %v TYPE %v", tableName, columnName, typ))
|
||||
return err
|
||||
}
|
||||
|
||||
func (s commonDialect) CurrentDatabase() (name string) {
|
||||
s.db.QueryRow("SELECT DATABASE()").Scan(&name)
|
||||
return
|
||||
}
|
||||
|
||||
func (commonDialect) LimitAndOffsetSQL(limit, offset interface{}) (sql string) {
|
||||
if limit != nil {
|
||||
if parsedLimit, err := strconv.ParseInt(fmt.Sprint(limit), 0, 0); err == nil && parsedLimit >= 0 {
|
||||
sql += fmt.Sprintf(" LIMIT %d", parsedLimit)
|
||||
}
|
||||
}
|
||||
if offset != nil {
|
||||
if parsedOffset, err := strconv.ParseInt(fmt.Sprint(offset), 0, 0); err == nil && parsedOffset >= 0 {
|
||||
sql += fmt.Sprintf(" OFFSET %d", parsedOffset)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (commonDialect) SelectFromDummyTable() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (commonDialect) LastInsertIDReturningSuffix(tableName, columnName string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (commonDialect) DefaultValueStr() string {
|
||||
return "DEFAULT VALUES"
|
||||
}
|
||||
|
||||
// BuildKeyName returns a valid key name (foreign key, index key) for the given table, field and reference
|
||||
func (DefaultForeignKeyNamer) BuildKeyName(kind, tableName string, fields ...string) string {
|
||||
keyName := fmt.Sprintf("%s_%s_%s", kind, tableName, strings.Join(fields, "_"))
|
||||
keyName = keyNameRegex.ReplaceAllString(keyName, "_")
|
||||
return keyName
|
||||
}
|
||||
|
||||
// NormalizeIndexAndColumn returns argument's index name and column name without doing anything
|
||||
func (commonDialect) NormalizeIndexAndColumn(indexName, columnName string) (string, string) {
|
||||
return indexName, columnName
|
||||
}
|
||||
|
||||
// IsByteArrayOrSlice returns true of the reflected value is an array or slice
|
||||
func IsByteArrayOrSlice(value reflect.Value) bool {
|
||||
return (value.Kind() == reflect.Array || value.Kind() == reflect.Slice) && value.Type().Elem() == reflect.TypeOf(uint8(0))
|
||||
}
|
||||
|
||||
type sqlite struct {
|
||||
commonDialect
|
||||
}
|
||||
|
||||
func init() {
|
||||
gorm.RegisterDialect("sqlite", &sqlite{})
|
||||
}
|
||||
|
||||
func (sqlite) GetName() string {
|
||||
return "sqlite"
|
||||
}
|
||||
|
||||
// Get Data Type for Sqlite Dialect
|
||||
func (s *sqlite) DataTypeOf(field *gorm.StructField) string {
|
||||
var dataValue, sqlType, size, additionalType = gorm.ParseFieldStructForDialect(field, s)
|
||||
|
||||
if sqlType == "" {
|
||||
switch dataValue.Kind() {
|
||||
case reflect.Bool:
|
||||
sqlType = "bool"
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr:
|
||||
if s.fieldCanAutoIncrement(field) {
|
||||
field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
|
||||
sqlType = "integer primary key autoincrement"
|
||||
} else {
|
||||
sqlType = "integer"
|
||||
}
|
||||
case reflect.Int64, reflect.Uint64:
|
||||
if s.fieldCanAutoIncrement(field) {
|
||||
field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT")
|
||||
sqlType = "integer primary key autoincrement"
|
||||
} else {
|
||||
sqlType = "bigint"
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
sqlType = "real"
|
||||
case reflect.String:
|
||||
if size > 0 && size < 65532 {
|
||||
sqlType = fmt.Sprintf("varchar(%d)", size)
|
||||
} else {
|
||||
sqlType = "text"
|
||||
}
|
||||
case reflect.Struct:
|
||||
if _, ok := dataValue.Interface().(time.Time); ok {
|
||||
sqlType = "datetime"
|
||||
}
|
||||
default:
|
||||
if IsByteArrayOrSlice(dataValue) {
|
||||
sqlType = "blob"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sqlType == "" {
|
||||
panic(fmt.Sprintf("invalid sql type %s (%s) for sqlite", dataValue.Type().Name(), dataValue.Kind().String()))
|
||||
}
|
||||
|
||||
if strings.TrimSpace(additionalType) == "" {
|
||||
return sqlType
|
||||
}
|
||||
return fmt.Sprintf("%v %v", sqlType, additionalType)
|
||||
}
|
||||
|
||||
func (s sqlite) HasIndex(tableName string, indexName string) bool {
|
||||
var count int
|
||||
s.db.QueryRow(fmt.Sprintf("SELECT count(*) FROM sqlite_master WHERE tbl_name = ? AND sql LIKE '%%INDEX %v ON%%'", indexName), tableName).Scan(&count)
|
||||
return count > 0
|
||||
}
|
||||
|
||||
func (s sqlite) HasTable(tableName string) bool {
|
||||
var count int
|
||||
s.db.QueryRow("SELECT count(*) FROM sqlite_master WHERE type='table' AND name=?", tableName).Scan(&count)
|
||||
return count > 0
|
||||
}
|
||||
|
||||
func (s sqlite) HasColumn(tableName string, columnName string) bool {
|
||||
var count int
|
||||
s.db.QueryRow(fmt.Sprintf("SELECT count(*) FROM sqlite_master WHERE tbl_name = ? AND (sql LIKE '%%\"%v\" %%' OR sql LIKE '%%%v %%');", columnName, columnName), tableName).Scan(&count)
|
||||
return count > 0
|
||||
}
|
||||
|
||||
func (s sqlite) CurrentDatabase() (name string) {
|
||||
var (
|
||||
ifaces = make([]interface{}, 3)
|
||||
pointers = make([]*string, 3)
|
||||
i int
|
||||
)
|
||||
for i = 0; i < 3; i++ {
|
||||
ifaces[i] = &pointers[i]
|
||||
}
|
||||
if err := s.db.QueryRow("PRAGMA database_list").Scan(ifaces...); err != nil {
|
||||
return
|
||||
}
|
||||
if pointers[1] != nil {
|
||||
name = *pointers[1]
|
||||
}
|
||||
return
|
||||
}
|
||||
39
application/migrator/model/file.go
Normal file
39
application/migrator/model/file.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/jinzhu/gorm"
|
||||
)
|
||||
|
||||
// File 文件
|
||||
type File struct {
|
||||
// 表字段
|
||||
gorm.Model
|
||||
Name string `gorm:"unique_index:idx_only_one"`
|
||||
SourceName string `gorm:"type:text"`
|
||||
UserID uint `gorm:"index:user_id;unique_index:idx_only_one"`
|
||||
Size uint64
|
||||
PicInfo string
|
||||
FolderID uint `gorm:"index:folder_id;unique_index:idx_only_one"`
|
||||
PolicyID uint
|
||||
UploadSessionID *string `gorm:"index:session_id;unique_index:session_only_one"`
|
||||
Metadata string `gorm:"type:text"`
|
||||
|
||||
// 关联模型
|
||||
Policy Policy `gorm:"PRELOAD:false,association_autoupdate:false"`
|
||||
|
||||
// 数据库忽略字段
|
||||
Position string `gorm:"-"`
|
||||
MetadataSerialized map[string]string `gorm:"-"`
|
||||
}
|
||||
|
||||
// Thumb related metadata
|
||||
const (
|
||||
ThumbStatusNotExist = ""
|
||||
ThumbStatusExist = "exist"
|
||||
ThumbStatusNotAvailable = "not_available"
|
||||
|
||||
ThumbStatusMetadataKey = "thumb_status"
|
||||
ThumbSidecarMetadataKey = "thumb_sidecar"
|
||||
|
||||
ChecksumMetadataKey = "webdav_checksum"
|
||||
)
|
||||
18
application/migrator/model/folder.go
Normal file
18
application/migrator/model/folder.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/jinzhu/gorm"
|
||||
)
|
||||
|
||||
// Folder 目录
|
||||
type Folder struct {
|
||||
// 表字段
|
||||
gorm.Model
|
||||
Name string `gorm:"unique_index:idx_only_one_name"`
|
||||
ParentID *uint `gorm:"index:parent_id;unique_index:idx_only_one_name"`
|
||||
OwnerID uint `gorm:"index:owner_id"`
|
||||
|
||||
// 数据库忽略字段
|
||||
Position string `gorm:"-"`
|
||||
WebdavDstName string `gorm:"-"`
|
||||
}
|
||||
38
application/migrator/model/group.go
Normal file
38
application/migrator/model/group.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/jinzhu/gorm"
|
||||
)
|
||||
|
||||
// Group 用户组模型
|
||||
type Group struct {
|
||||
gorm.Model
|
||||
Name string
|
||||
Policies string
|
||||
MaxStorage uint64
|
||||
ShareEnabled bool
|
||||
WebDAVEnabled bool
|
||||
SpeedLimit int
|
||||
Options string `json:"-" gorm:"size:4294967295"`
|
||||
|
||||
// 数据库忽略字段
|
||||
PolicyList []uint `gorm:"-"`
|
||||
OptionsSerialized GroupOption `gorm:"-"`
|
||||
}
|
||||
|
||||
// GroupOption 用户组其他配置
|
||||
type GroupOption struct {
|
||||
ArchiveDownload bool `json:"archive_download,omitempty"` // 打包下载
|
||||
ArchiveTask bool `json:"archive_task,omitempty"` // 在线压缩
|
||||
CompressSize uint64 `json:"compress_size,omitempty"` // 可压缩大小
|
||||
DecompressSize uint64 `json:"decompress_size,omitempty"`
|
||||
OneTimeDownload bool `json:"one_time_download,omitempty"`
|
||||
ShareDownload bool `json:"share_download,omitempty"`
|
||||
Aria2 bool `json:"aria2,omitempty"` // 离线下载
|
||||
Aria2Options map[string]interface{} `json:"aria2_options,omitempty"` // 离线下载用户组配置
|
||||
SourceBatchSize int `json:"source_batch,omitempty"`
|
||||
RedirectedSource bool `json:"redirected_source,omitempty"`
|
||||
Aria2BatchSize int `json:"aria2_batch,omitempty"`
|
||||
AdvanceDelete bool `json:"advance_delete,omitempty"`
|
||||
WebDAVProxy bool `json:"webdav_proxy,omitempty"`
|
||||
}
|
||||
96
application/migrator/model/init.go
Normal file
96
application/migrator/model/init.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/jinzhu/gorm"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/migrator/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
_ "github.com/jinzhu/gorm/dialects/mssql"
|
||||
_ "github.com/jinzhu/gorm/dialects/mysql"
|
||||
_ "github.com/jinzhu/gorm/dialects/postgres"
|
||||
)
|
||||
|
||||
// DB 数据库链接单例
|
||||
var DB *gorm.DB
|
||||
|
||||
// Init 初始化 MySQL 链接
|
||||
func Init() error {
|
||||
var (
|
||||
db *gorm.DB
|
||||
err error
|
||||
confDBType string = conf.DatabaseConfig.Type
|
||||
)
|
||||
|
||||
// 兼容已有配置中的 "sqlite3" 配置项
|
||||
if confDBType == "sqlite3" {
|
||||
confDBType = "sqlite"
|
||||
}
|
||||
|
||||
// 兼容 "mariadb" 数据库
|
||||
if confDBType == "mariadb" {
|
||||
confDBType = "mysql"
|
||||
}
|
||||
|
||||
switch confDBType {
|
||||
case "UNSET", "sqlite":
|
||||
// 未指定数据库或者明确指定为 sqlite 时,使用 SQLite 数据库
|
||||
db, err = gorm.Open("sqlite3", util.RelativePath(conf.DatabaseConfig.DBFile))
|
||||
case "postgres":
|
||||
db, err = gorm.Open(confDBType, fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=%d sslmode=disable",
|
||||
conf.DatabaseConfig.Host,
|
||||
conf.DatabaseConfig.User,
|
||||
conf.DatabaseConfig.Password,
|
||||
conf.DatabaseConfig.Name,
|
||||
conf.DatabaseConfig.Port))
|
||||
case "mysql", "mssql":
|
||||
var host string
|
||||
if conf.DatabaseConfig.UnixSocket {
|
||||
host = fmt.Sprintf("unix(%s)",
|
||||
conf.DatabaseConfig.Host)
|
||||
} else {
|
||||
host = fmt.Sprintf("(%s:%d)",
|
||||
conf.DatabaseConfig.Host,
|
||||
conf.DatabaseConfig.Port)
|
||||
}
|
||||
|
||||
db, err = gorm.Open(confDBType, fmt.Sprintf("%s:%s@%s/%s?charset=%s&parseTime=True&loc=Local",
|
||||
conf.DatabaseConfig.User,
|
||||
conf.DatabaseConfig.Password,
|
||||
host,
|
||||
conf.DatabaseConfig.Name,
|
||||
conf.DatabaseConfig.Charset))
|
||||
default:
|
||||
return fmt.Errorf("unsupported database type %q", confDBType)
|
||||
}
|
||||
|
||||
//db.SetLogger(util.Log())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect to database: %w", err)
|
||||
}
|
||||
|
||||
// 处理表前缀
|
||||
gorm.DefaultTableNameHandler = func(db *gorm.DB, defaultTableName string) string {
|
||||
return conf.DatabaseConfig.TablePrefix + defaultTableName
|
||||
}
|
||||
|
||||
// Debug模式下,输出所有 SQL 日志
|
||||
db.LogMode(true)
|
||||
|
||||
//设置连接池
|
||||
db.DB().SetMaxIdleConns(50)
|
||||
if confDBType == "sqlite" || confDBType == "UNSET" {
|
||||
db.DB().SetMaxOpenConns(1)
|
||||
} else {
|
||||
db.DB().SetMaxOpenConns(100)
|
||||
}
|
||||
|
||||
//超时
|
||||
db.DB().SetConnMaxLifetime(time.Second * 30)
|
||||
|
||||
DB = db
|
||||
|
||||
return nil
|
||||
}
|
||||
51
application/migrator/model/node.go
Normal file
51
application/migrator/model/node.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/jinzhu/gorm"
|
||||
)
|
||||
|
||||
// Node 从机节点信息模型
|
||||
type Node struct {
|
||||
gorm.Model
|
||||
Status NodeStatus // 节点状态
|
||||
Name string // 节点别名
|
||||
Type ModelType // 节点状态
|
||||
Server string // 服务器地址
|
||||
SlaveKey string `gorm:"type:text"` // 主->从 通信密钥
|
||||
MasterKey string `gorm:"type:text"` // 从->主 通信密钥
|
||||
Aria2Enabled bool // 是否支持用作离线下载节点
|
||||
Aria2Options string `gorm:"type:text"` // 离线下载配置
|
||||
Rank int // 负载均衡权重
|
||||
|
||||
// 数据库忽略字段
|
||||
Aria2OptionsSerialized Aria2Option `gorm:"-"`
|
||||
}
|
||||
|
||||
// Aria2Option 非公有的Aria2配置属性
|
||||
type Aria2Option struct {
|
||||
// RPC 服务器地址
|
||||
Server string `json:"server,omitempty"`
|
||||
// RPC 密钥
|
||||
Token string `json:"token,omitempty"`
|
||||
// 临时下载目录
|
||||
TempPath string `json:"temp_path,omitempty"`
|
||||
// 附加下载配置
|
||||
Options string `json:"options,omitempty"`
|
||||
// 下载监控间隔
|
||||
Interval int `json:"interval,omitempty"`
|
||||
// RPC API 请求超时
|
||||
Timeout int `json:"timeout,omitempty"`
|
||||
}
|
||||
|
||||
type NodeStatus int
|
||||
type ModelType int
|
||||
|
||||
const (
|
||||
NodeActive NodeStatus = iota
|
||||
NodeSuspend
|
||||
)
|
||||
|
||||
const (
|
||||
SlaveNodeType ModelType = iota
|
||||
MasterNodeType
|
||||
)
|
||||
62
application/migrator/model/policy.go
Normal file
62
application/migrator/model/policy.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/jinzhu/gorm"
|
||||
)
|
||||
|
||||
// Policy 存储策略
|
||||
type Policy struct {
|
||||
// 表字段
|
||||
gorm.Model
|
||||
Name string
|
||||
Type string
|
||||
Server string
|
||||
BucketName string
|
||||
IsPrivate bool
|
||||
BaseURL string
|
||||
AccessKey string `gorm:"type:text"`
|
||||
SecretKey string `gorm:"type:text"`
|
||||
MaxSize uint64
|
||||
AutoRename bool
|
||||
DirNameRule string
|
||||
FileNameRule string
|
||||
IsOriginLinkEnable bool
|
||||
Options string `gorm:"type:text"`
|
||||
|
||||
// 数据库忽略字段
|
||||
OptionsSerialized PolicyOption `gorm:"-"`
|
||||
MasterID string `gorm:"-"`
|
||||
}
|
||||
|
||||
// PolicyOption 非公有的存储策略属性
|
||||
type PolicyOption struct {
|
||||
// Upyun访问Token
|
||||
Token string `json:"token"`
|
||||
// 允许的文件扩展名
|
||||
FileType []string `json:"file_type"`
|
||||
// MimeType
|
||||
MimeType string `json:"mimetype"`
|
||||
// OauthRedirect Oauth 重定向地址
|
||||
OauthRedirect string `json:"od_redirect,omitempty"`
|
||||
// OdProxy Onedrive 反代地址
|
||||
OdProxy string `json:"od_proxy,omitempty"`
|
||||
// OdDriver OneDrive 驱动器定位符
|
||||
OdDriver string `json:"od_driver,omitempty"`
|
||||
// Region 区域代码
|
||||
Region string `json:"region,omitempty"`
|
||||
// ServerSideEndpoint 服务端请求使用的 Endpoint,为空时使用 Policy.Server 字段
|
||||
ServerSideEndpoint string `json:"server_side_endpoint,omitempty"`
|
||||
// 分片上传的分片大小
|
||||
ChunkSize uint64 `json:"chunk_size,omitempty"`
|
||||
// 分片上传时是否需要预留空间
|
||||
PlaceholderWithSize bool `json:"placeholder_with_size,omitempty"`
|
||||
// 每秒对存储端的 API 请求上限
|
||||
TPSLimit float64 `json:"tps_limit,omitempty"`
|
||||
// 每秒 API 请求爆发上限
|
||||
TPSLimitBurst int `json:"tps_limit_burst,omitempty"`
|
||||
// Set this to `true` to force the request to use path-style addressing,
|
||||
// i.e., `http://s3.amazonaws.com/BUCKET/KEY `
|
||||
S3ForcePathStyle bool `json:"s3_path_style"`
|
||||
// File extensions that support thumbnail generation using native policy API.
|
||||
ThumbExts []string `json:"thumb_exts,omitempty"`
|
||||
}
|
||||
13
application/migrator/model/setting.go
Normal file
13
application/migrator/model/setting.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/jinzhu/gorm"
|
||||
)
|
||||
|
||||
// Setting 系统设置模型
|
||||
type Setting struct {
|
||||
gorm.Model
|
||||
Type string `gorm:"not null"`
|
||||
Name string `gorm:"unique;not null;index:setting_key"`
|
||||
Value string `gorm:"size:65535"`
|
||||
}
|
||||
27
application/migrator/model/share.go
Normal file
27
application/migrator/model/share.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/jinzhu/gorm"
|
||||
)
|
||||
|
||||
// Share 分享模型
|
||||
type Share struct {
|
||||
gorm.Model
|
||||
Password string // 分享密码,空值为非加密分享
|
||||
IsDir bool // 原始资源是否为目录
|
||||
UserID uint // 创建用户ID
|
||||
SourceID uint // 原始资源ID
|
||||
Views int // 浏览数
|
||||
Downloads int // 下载数
|
||||
RemainDownloads int // 剩余下载配额,负值标识无限制
|
||||
Expires *time.Time // 过期时间,空值表示无过期时间
|
||||
PreviewEnabled bool // 是否允许直接预览
|
||||
SourceName string `gorm:"index:source"` // 用于搜索的字段
|
||||
|
||||
// 数据库忽略字段
|
||||
User User `gorm:"PRELOAD:false,association_autoupdate:false"`
|
||||
File File `gorm:"PRELOAD:false,association_autoupdate:false"`
|
||||
Folder Folder `gorm:"PRELOAD:false,association_autoupdate:false"`
|
||||
}
|
||||
16
application/migrator/model/source_link.go
Normal file
16
application/migrator/model/source_link.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/jinzhu/gorm"
|
||||
)
|
||||
|
||||
// SourceLink represent a shared file source link
|
||||
type SourceLink struct {
|
||||
gorm.Model
|
||||
FileID uint // corresponding file ID
|
||||
Name string // name of the file while creating the source link, for annotation
|
||||
Downloads int // 下载数
|
||||
|
||||
// 关联模型
|
||||
File File `gorm:"save_associations:false:false"`
|
||||
}
|
||||
23
application/migrator/model/tag.go
Normal file
23
application/migrator/model/tag.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/jinzhu/gorm"
|
||||
)
|
||||
|
||||
// Tag 用户自定义标签
|
||||
type Tag struct {
|
||||
gorm.Model
|
||||
Name string // 标签名
|
||||
Icon string // 图标标识
|
||||
Color string // 图标颜色
|
||||
Type int // 标签类型(文件分类/目录直达)
|
||||
Expression string `gorm:"type:text"` // 搜索表表达式/直达路径
|
||||
UserID uint // 创建者ID
|
||||
}
|
||||
|
||||
const (
|
||||
// FileTagType 文件分类标签
|
||||
FileTagType = iota
|
||||
// DirectoryLinkType 目录快捷方式标签
|
||||
DirectoryLinkType
|
||||
)
|
||||
16
application/migrator/model/task.go
Normal file
16
application/migrator/model/task.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/jinzhu/gorm"
|
||||
)
|
||||
|
||||
// Task 任务模型
|
||||
type Task struct {
|
||||
gorm.Model
|
||||
Status int // 任务状态
|
||||
Type int // 任务类型
|
||||
UserID uint // 发起者UID,0表示为系统发起
|
||||
Progress int // 进度
|
||||
Error string `gorm:"type:text"` // 错误信息
|
||||
Props string `gorm:"type:text"` // 任务属性
|
||||
}
|
||||
45
application/migrator/model/user.go
Normal file
45
application/migrator/model/user.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/jinzhu/gorm"
|
||||
)
|
||||
|
||||
const (
|
||||
// Active 账户正常状态
|
||||
Active = iota
|
||||
// NotActivicated 未激活
|
||||
NotActivicated
|
||||
// Baned 被封禁
|
||||
Baned
|
||||
// OveruseBaned 超额使用被封禁
|
||||
OveruseBaned
|
||||
)
|
||||
|
||||
// User 用户模型
|
||||
type User struct {
|
||||
// 表字段
|
||||
gorm.Model
|
||||
Email string `gorm:"type:varchar(100);unique_index"`
|
||||
Nick string `gorm:"size:50"`
|
||||
Password string `json:"-"`
|
||||
Status int
|
||||
GroupID uint
|
||||
Storage uint64
|
||||
TwoFactor string
|
||||
Avatar string
|
||||
Options string `json:"-" gorm:"size:4294967295"`
|
||||
Authn string `gorm:"size:4294967295"`
|
||||
|
||||
// 关联模型
|
||||
Group Group `gorm:"save_associations:false:false"`
|
||||
Policy Policy `gorm:"PRELOAD:false,association_autoupdate:false"`
|
||||
|
||||
// 数据库忽略字段
|
||||
OptionsSerialized UserOption `gorm:"-"`
|
||||
}
|
||||
|
||||
// UserOption 用户个性化配置字段
|
||||
type UserOption struct {
|
||||
ProfileOff bool `json:"profile_off,omitempty"`
|
||||
PreferredTheme string `json:"preferred_theme,omitempty"`
|
||||
}
|
||||
16
application/migrator/model/webdav.go
Normal file
16
application/migrator/model/webdav.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/jinzhu/gorm"
|
||||
)
|
||||
|
||||
// Webdav 应用账户
|
||||
type Webdav struct {
|
||||
gorm.Model
|
||||
Name string // 应用名称
|
||||
Password string `gorm:"unique_index:password_only_on"` // 应用密码
|
||||
UserID uint `gorm:"unique_index:password_only_on"` // 用户ID
|
||||
Root string `gorm:"type:text"` // 根目录
|
||||
Readonly bool `gorm:"type:bool"` // 是否只读
|
||||
UseProxy bool `gorm:"type:bool"` // 是否进行反代
|
||||
}
|
||||
89
application/migrator/node.go
Normal file
89
application/migrator/node.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package migrator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/node"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
)
|
||||
|
||||
func (m *Migrator) migrateNode() error {
|
||||
m.l.Info("Migrating nodes...")
|
||||
|
||||
var nodes []model.Node
|
||||
if err := model.DB.Find(&nodes).Error; err != nil {
|
||||
return fmt.Errorf("failed to list v3 nodes: %w", err)
|
||||
}
|
||||
|
||||
for _, n := range nodes {
|
||||
nodeType := node.TypeSlave
|
||||
nodeStatus := node.StatusSuspended
|
||||
if n.Type == model.MasterNodeType {
|
||||
nodeType = node.TypeMaster
|
||||
}
|
||||
if n.Status == model.NodeActive {
|
||||
nodeStatus = node.StatusActive
|
||||
}
|
||||
|
||||
cap := &boolset.BooleanSet{}
|
||||
settings := &types.NodeSetting{
|
||||
Provider: types.DownloaderProviderAria2,
|
||||
}
|
||||
|
||||
if n.Aria2Enabled {
|
||||
boolset.Sets(map[types.NodeCapability]bool{
|
||||
types.NodeCapabilityRemoteDownload: true,
|
||||
}, cap)
|
||||
|
||||
aria2Options := &model.Aria2Option{}
|
||||
if err := json.Unmarshal([]byte(n.Aria2Options), aria2Options); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal aria2 options: %w", err)
|
||||
}
|
||||
|
||||
downloaderOptions := map[string]any{}
|
||||
if aria2Options.Options != "" {
|
||||
if err := json.Unmarshal([]byte(aria2Options.Options), &downloaderOptions); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal aria2 options: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
settings.Aria2Setting = &types.Aria2Setting{
|
||||
Server: aria2Options.Server,
|
||||
Token: aria2Options.Token,
|
||||
Options: downloaderOptions,
|
||||
TempPath: aria2Options.TempPath,
|
||||
}
|
||||
}
|
||||
|
||||
if n.Type == model.MasterNodeType {
|
||||
boolset.Sets(map[types.NodeCapability]bool{
|
||||
types.NodeCapabilityExtractArchive: true,
|
||||
types.NodeCapabilityCreateArchive: true,
|
||||
}, cap)
|
||||
}
|
||||
|
||||
stm := m.v4client.Node.Create().
|
||||
SetRawID(int(n.ID)).
|
||||
SetCreatedAt(formatTime(n.CreatedAt)).
|
||||
SetUpdatedAt(formatTime(n.UpdatedAt)).
|
||||
SetName(n.Name).
|
||||
SetType(nodeType).
|
||||
SetStatus(nodeStatus).
|
||||
SetServer(n.Server).
|
||||
SetSlaveKey(n.SlaveKey).
|
||||
SetCapabilities(cap).
|
||||
SetSettings(settings).
|
||||
SetWeight(n.Rank)
|
||||
|
||||
if err := stm.Exec(context.Background()); err != nil {
|
||||
return fmt.Errorf("failed to create node %q: %w", n.Name, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
196
application/migrator/policy.go
Normal file
196
application/migrator/policy.go
Normal file
@@ -0,0 +1,196 @@
|
||||
package migrator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/node"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
func (m *Migrator) migratePolicy() (map[int]bool, error) {
|
||||
m.l.Info("Migrating storage policies...")
|
||||
var policies []model.Policy
|
||||
if err := model.DB.Find(&policies).Error; err != nil {
|
||||
return nil, fmt.Errorf("failed to list v3 storage policies: %w", err)
|
||||
}
|
||||
|
||||
if m.state.LocalPolicyIDs == nil {
|
||||
m.state.LocalPolicyIDs = make(map[int]bool)
|
||||
}
|
||||
|
||||
if m.state.PolicyIDs == nil {
|
||||
m.state.PolicyIDs = make(map[int]bool)
|
||||
}
|
||||
|
||||
m.l.Info("Found %d v3 storage policies to be migrated.", len(policies))
|
||||
|
||||
// get thumb proxy settings
|
||||
var (
|
||||
thumbProxySettings []model.Setting
|
||||
thumbProxyEnabled bool
|
||||
thumbProxyPolicy []int
|
||||
)
|
||||
if err := model.DB.Where("name in (?)", []string{"thumb_proxy_enabled", "thumb_proxy_policy"}).Find(&thumbProxySettings).Error; err != nil {
|
||||
m.l.Warning("Failed to list v3 thumb proxy settings: %w", err)
|
||||
}
|
||||
|
||||
tx, err := m.v4client.Tx(context.Background())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to start transaction: %w", err)
|
||||
}
|
||||
|
||||
for _, s := range thumbProxySettings {
|
||||
if s.Name == "thumb_proxy_enabled" {
|
||||
thumbProxyEnabled = setting.IsTrueValue(s.Value)
|
||||
} else if s.Name == "thumb_proxy_policy" {
|
||||
if err := json.Unmarshal([]byte(s.Value), &thumbProxyPolicy); err != nil {
|
||||
m.l.Warning("Failed to unmarshal v3 thumb proxy policy: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, policy := range policies {
|
||||
m.l.Info("Migrating storage policy %q...", policy.Name)
|
||||
if err := json.Unmarshal([]byte(policy.Options), &policy.OptionsSerialized); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal options for policy %q: %w", policy.Name, err)
|
||||
}
|
||||
|
||||
settings := &types.PolicySetting{
|
||||
Token: policy.OptionsSerialized.Token,
|
||||
FileType: policy.OptionsSerialized.FileType,
|
||||
OauthRedirect: policy.OptionsSerialized.OauthRedirect,
|
||||
OdDriver: policy.OptionsSerialized.OdDriver,
|
||||
Region: policy.OptionsSerialized.Region,
|
||||
ServerSideEndpoint: policy.OptionsSerialized.ServerSideEndpoint,
|
||||
ChunkSize: int64(policy.OptionsSerialized.ChunkSize),
|
||||
TPSLimit: policy.OptionsSerialized.TPSLimit,
|
||||
TPSLimitBurst: policy.OptionsSerialized.TPSLimitBurst,
|
||||
S3ForcePathStyle: policy.OptionsSerialized.S3ForcePathStyle,
|
||||
ThumbExts: policy.OptionsSerialized.ThumbExts,
|
||||
}
|
||||
|
||||
if policy.Type == types.PolicyTypeOd {
|
||||
settings.ThumbSupportAllExts = true
|
||||
} else {
|
||||
switch policy.Type {
|
||||
case types.PolicyTypeCos:
|
||||
settings.ThumbExts = []string{"png", "jpg", "jpeg", "gif", "bmp", "webp", "heif", "heic"}
|
||||
case types.PolicyTypeOss:
|
||||
settings.ThumbExts = []string{"png", "jpg", "jpeg", "gif", "bmp", "webp", "heic", "tiff", "avif"}
|
||||
case types.PolicyTypeUpyun:
|
||||
settings.ThumbExts = []string{"png", "jpg", "jpeg", "gif", "bmp", "webp", "svg"}
|
||||
case types.PolicyTypeQiniu:
|
||||
settings.ThumbExts = []string{"png", "jpg", "jpeg", "gif", "bmp", "webp", "tiff", "avif", "psd"}
|
||||
case types.PolicyTypeRemote:
|
||||
settings.ThumbExts = []string{"png", "jpg", "jpeg", "gif"}
|
||||
}
|
||||
}
|
||||
|
||||
if policy.Type != types.PolicyTypeOd && policy.BaseURL != "" {
|
||||
settings.CustomProxy = true
|
||||
settings.ProxyServer = policy.BaseURL
|
||||
} else if policy.OptionsSerialized.OdProxy != "" {
|
||||
settings.CustomProxy = true
|
||||
settings.ProxyServer = policy.OptionsSerialized.OdProxy
|
||||
}
|
||||
|
||||
if policy.Type == types.PolicyTypeCos {
|
||||
settings.ChunkSize = 1024 * 1024 * 25
|
||||
}
|
||||
|
||||
if thumbProxyEnabled && lo.Contains(thumbProxyPolicy, int(policy.ID)) {
|
||||
settings.ThumbGeneratorProxy = true
|
||||
}
|
||||
|
||||
mustContain := []string{"{randomkey16}", "{randomkey8}", "{uuid}"}
|
||||
hasRandomElement := false
|
||||
for _, c := range mustContain {
|
||||
if strings.Contains(policy.FileNameRule, c) {
|
||||
hasRandomElement = true
|
||||
break
|
||||
}
|
||||
|
||||
if strings.Contains(policy.DirNameRule, c) {
|
||||
hasRandomElement = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasRandomElement {
|
||||
if policy.DirNameRule == "" {
|
||||
policy.DirNameRule = "uploads/{uid}/{path}"
|
||||
}
|
||||
policy.FileNameRule = "{uid}_{randomkey8}_{originname}"
|
||||
m.l.Warning("Storage policy %q has no random element in file name rule, using default file name rule.", policy.Name)
|
||||
}
|
||||
|
||||
stm := tx.StoragePolicy.Create().
|
||||
SetRawID(int(policy.ID)).
|
||||
SetCreatedAt(formatTime(policy.CreatedAt)).
|
||||
SetUpdatedAt(formatTime(policy.UpdatedAt)).
|
||||
SetName(policy.Name).
|
||||
SetType(policy.Type).
|
||||
SetServer(policy.Server).
|
||||
SetBucketName(policy.BucketName).
|
||||
SetIsPrivate(policy.IsPrivate).
|
||||
SetAccessKey(policy.AccessKey).
|
||||
SetSecretKey(policy.SecretKey).
|
||||
SetMaxSize(int64(policy.MaxSize)).
|
||||
SetDirNameRule(policy.DirNameRule).
|
||||
SetFileNameRule(policy.FileNameRule).
|
||||
SetSettings(settings)
|
||||
|
||||
if policy.Type == types.PolicyTypeRemote {
|
||||
m.l.Info("Storage policy %q is remote, creating node for it...", policy.Name)
|
||||
bs := &boolset.BooleanSet{}
|
||||
n, err := tx.Node.Create().
|
||||
SetName(policy.Name).
|
||||
SetStatus(node.StatusActive).
|
||||
SetServer(policy.Server).
|
||||
SetSlaveKey(policy.SecretKey).
|
||||
SetType(node.TypeSlave).
|
||||
SetCapabilities(bs).
|
||||
SetSettings(&types.NodeSetting{
|
||||
Provider: types.DownloaderProviderAria2,
|
||||
}).
|
||||
Save(context.Background())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create node for storage policy %q: %w", policy.Name, err)
|
||||
}
|
||||
|
||||
stm.SetNodeID(n.ID)
|
||||
}
|
||||
|
||||
if _, err := stm.Save(context.Background()); err != nil {
|
||||
return nil, fmt.Errorf("failed to create storage policy %q: %w", policy.Name, err)
|
||||
}
|
||||
|
||||
m.state.PolicyIDs[int(policy.ID)] = true
|
||||
if policy.Type == types.PolicyTypeLocal {
|
||||
m.state.LocalPolicyIDs[int(policy.ID)] = true
|
||||
}
|
||||
}
|
||||
if err := tx.Commit(); err != nil {
|
||||
return nil, fmt.Errorf("failed to commit transaction: %w", err)
|
||||
}
|
||||
|
||||
if m.dep.ConfigProvider().Database().Type == conf.PostgresDB {
|
||||
m.l.Info("Resetting storage policy ID sequence for postgres...")
|
||||
m.v4client.StoragePolicy.ExecContext(context.Background(), "SELECT SETVAL('storage_policies_id_seq', (SELECT MAX(id) FROM storage_policies))")
|
||||
}
|
||||
|
||||
if m.dep.ConfigProvider().Database().Type == conf.PostgresDB {
|
||||
m.l.Info("Resetting node ID sequence for postgres...")
|
||||
m.v4client.Node.ExecContext(context.Background(), "SELECT SETVAL('nodes_id_seq', (SELECT MAX(id) FROM nodes))")
|
||||
}
|
||||
|
||||
return m.state.PolicyIDs, nil
|
||||
}
|
||||
213
application/migrator/settings.go
Normal file
213
application/migrator/settings.go
Normal file
@@ -0,0 +1,213 @@
|
||||
package migrator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/application/migrator/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
|
||||
)
|
||||
|
||||
// TODO:
|
||||
// 1. Policy thumb proxy migration
|
||||
|
||||
type (
|
||||
settignMigrator func(allSettings map[string]string, name, value string) ([]settingMigrated, error)
|
||||
settingMigrated struct {
|
||||
name string
|
||||
value string
|
||||
}
|
||||
// PackProduct 容量包商品
|
||||
PackProduct struct {
|
||||
ID int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Size uint64 `json:"size"`
|
||||
Time int64 `json:"time"`
|
||||
Price int `json:"price"`
|
||||
Score int `json:"score"`
|
||||
}
|
||||
GroupProducts struct {
|
||||
ID int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
GroupID uint `json:"group_id"`
|
||||
Time int64 `json:"time"`
|
||||
Price int `json:"price"`
|
||||
Score int `json:"score"`
|
||||
Des []string `json:"des"`
|
||||
Highlight bool `json:"highlight"`
|
||||
}
|
||||
)
|
||||
|
||||
var noopMigrator = func(allSettings map[string]string, name, value string) ([]settingMigrated, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var migrators = map[string]settignMigrator{
|
||||
"siteKeywords": noopMigrator,
|
||||
"over_used_template": noopMigrator,
|
||||
"download_timeout": noopMigrator,
|
||||
"preview_timeout": noopMigrator,
|
||||
"doc_preview_timeout": noopMigrator,
|
||||
"slave_node_retry": noopMigrator,
|
||||
"slave_ping_interval": noopMigrator,
|
||||
"slave_recover_interval": noopMigrator,
|
||||
"slave_transfer_timeout": noopMigrator,
|
||||
"onedrive_monitor_timeout": noopMigrator,
|
||||
"onedrive_source_timeout": noopMigrator,
|
||||
"share_download_session_timeout": noopMigrator,
|
||||
"onedrive_callback_check": noopMigrator,
|
||||
"mail_activation_template": noopMigrator,
|
||||
"mail_reset_pwd_template": noopMigrator,
|
||||
"appid": noopMigrator,
|
||||
"appkey": noopMigrator,
|
||||
"wechat_enabled": noopMigrator,
|
||||
"wechat_appid": noopMigrator,
|
||||
"wechat_mchid": noopMigrator,
|
||||
"wechat_serial_no": noopMigrator,
|
||||
"wechat_api_key": noopMigrator,
|
||||
"wechat_pk_content": noopMigrator,
|
||||
"hot_share_num": noopMigrator,
|
||||
"defaultTheme": noopMigrator,
|
||||
"theme_options": noopMigrator,
|
||||
"max_worker_num": noopMigrator,
|
||||
"max_parallel_transfer": noopMigrator,
|
||||
"secret_key": noopMigrator,
|
||||
"avatar_size_m": noopMigrator,
|
||||
"avatar_size_s": noopMigrator,
|
||||
"home_view_method": noopMigrator,
|
||||
"share_view_method": noopMigrator,
|
||||
"cron_recycle_upload_session": noopMigrator,
|
||||
"captcha_type": func(allSettings map[string]string, name, value string) ([]settingMigrated, error) {
|
||||
if value == "tcaptcha" {
|
||||
value = "normal"
|
||||
}
|
||||
return []settingMigrated{
|
||||
{
|
||||
name: "captcha_type",
|
||||
value: value,
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
"captcha_TCaptcha_CaptchaAppId": noopMigrator,
|
||||
"captcha_TCaptcha_AppSecretKey": noopMigrator,
|
||||
"captcha_TCaptcha_SecretId": noopMigrator,
|
||||
"captcha_TCaptcha_SecretKey": noopMigrator,
|
||||
"thumb_file_suffix": func(allSettings map[string]string, name, value string) ([]settingMigrated, error) {
|
||||
return []settingMigrated{
|
||||
{
|
||||
name: "thumb_entity_suffix",
|
||||
value: value,
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
"thumb_max_src_size": func(allSettings map[string]string, name, value string) ([]settingMigrated, error) {
|
||||
return []settingMigrated{
|
||||
{
|
||||
name: "thumb_music_cover_max_size",
|
||||
value: value,
|
||||
},
|
||||
{
|
||||
name: "thumb_libreoffice_max_size",
|
||||
value: value,
|
||||
},
|
||||
{
|
||||
name: "thumb_ffmpeg_max_size",
|
||||
value: value,
|
||||
},
|
||||
{
|
||||
name: "thumb_vips_max_size",
|
||||
value: value,
|
||||
},
|
||||
{
|
||||
name: "thumb_builtin_max_size",
|
||||
value: value,
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
"initial_files": noopMigrator,
|
||||
"office_preview_service": noopMigrator,
|
||||
"phone_required": noopMigrator,
|
||||
"phone_enabled": noopMigrator,
|
||||
"wopi_session_timeout": func(allSettings map[string]string, name, value string) ([]settingMigrated, error) {
|
||||
return []settingMigrated{
|
||||
{
|
||||
name: "viewer_session_timeout",
|
||||
value: value,
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
"custom_payment_enabled": noopMigrator,
|
||||
"custom_payment_endpoint": noopMigrator,
|
||||
"custom_payment_secret": noopMigrator,
|
||||
"custom_payment_name": noopMigrator,
|
||||
}
|
||||
|
||||
func (m *Migrator) migrateSettings() error {
|
||||
m.l.Info("Migrating settings...")
|
||||
// 1. List all settings
|
||||
var settings []model.Setting
|
||||
if err := model.DB.Find(&settings).Error; err != nil {
|
||||
return fmt.Errorf("failed to list v3 settings: %w", err)
|
||||
}
|
||||
|
||||
m.l.Info("Found %d v3 setting pairs to be migrated.", len(settings))
|
||||
|
||||
allSettings := make(map[string]string)
|
||||
for _, s := range settings {
|
||||
allSettings[s.Name] = s.Value
|
||||
}
|
||||
|
||||
migratedSettings := make([]settingMigrated, 0)
|
||||
for _, s := range settings {
|
||||
if s.Name == "thumb_file_suffix" {
|
||||
m.state.ThumbSuffix = s.Value
|
||||
}
|
||||
if s.Name == "avatar_path" {
|
||||
m.state.V3AvatarPath = s.Value
|
||||
}
|
||||
migrator, ok := migrators[s.Name]
|
||||
if ok {
|
||||
newSettings, err := migrator(allSettings, s.Name, s.Value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to migrate setting %q: %w", s.Name, err)
|
||||
}
|
||||
migratedSettings = append(migratedSettings, newSettings...)
|
||||
} else {
|
||||
migratedSettings = append(migratedSettings, settingMigrated{
|
||||
name: s.Name,
|
||||
value: s.Value,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
tx, err := m.v4client.Tx(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start transaction: %w", err)
|
||||
}
|
||||
|
||||
// Insert hash_id_salt
|
||||
if conf.SystemConfig.HashIDSalt != "" {
|
||||
if err := tx.Setting.Create().SetName("hash_id_salt").SetValue(conf.SystemConfig.HashIDSalt).Exec(context.Background()); err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
return fmt.Errorf("failed to rollback transaction: %w", err)
|
||||
}
|
||||
return fmt.Errorf("failed to create setting hash_id_salt: %w", err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("hash ID salt is not set, please set it from v3 conf file")
|
||||
}
|
||||
|
||||
for _, s := range migratedSettings {
|
||||
if err := tx.Setting.Create().SetName(s.name).SetValue(s.value).Exec(context.Background()); err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
return fmt.Errorf("failed to rollback transaction: %w", err)
|
||||
}
|
||||
return fmt.Errorf("failed to create setting %q: %w", s.name, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return fmt.Errorf("failed to commit transaction: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
102
application/migrator/share.go
Normal file
102
application/migrator/share.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package migrator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/file"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
)
|
||||
|
||||
func (m *Migrator) migrateShare() error {
|
||||
m.l.Info("Migrating shares...")
|
||||
batchSize := 1000
|
||||
offset := m.state.ShareOffset
|
||||
ctx := context.Background()
|
||||
|
||||
if offset > 0 {
|
||||
m.l.Info("Resuming share migration from offset %d", offset)
|
||||
}
|
||||
|
||||
for {
|
||||
m.l.Info("Migrating shares with offset %d", offset)
|
||||
var shares []model.Share
|
||||
if err := model.DB.Limit(batchSize).Offset(offset).Find(&shares).Error; err != nil {
|
||||
return fmt.Errorf("failed to list v3 shares: %w", err)
|
||||
}
|
||||
|
||||
if len(shares) == 0 {
|
||||
if m.dep.ConfigProvider().Database().Type == conf.PostgresDB {
|
||||
m.l.Info("Resetting share ID sequence for postgres...")
|
||||
m.v4client.Share.ExecContext(ctx, "SELECT SETVAL('shares_id_seq', (SELECT MAX(id) FROM shares))")
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
tx, err := m.v4client.Tx(ctx)
|
||||
if err != nil {
|
||||
_ = tx.Rollback()
|
||||
return fmt.Errorf("failed to start transaction: %w", err)
|
||||
}
|
||||
|
||||
for _, s := range shares {
|
||||
sourceId := int(s.SourceID)
|
||||
if !s.IsDir {
|
||||
sourceId += m.state.LastFolderID
|
||||
}
|
||||
|
||||
// check if file exists
|
||||
_, err = tx.File.Query().Where(file.ID(sourceId)).First(ctx)
|
||||
if err != nil {
|
||||
m.l.Warning("File %d not found, skipping share %d", sourceId, s.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
// check if user exist
|
||||
if _, ok := m.state.UserIDs[int(s.UserID)]; !ok {
|
||||
m.l.Warning("User %d not found, skipping share %d", s.UserID, s.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
stm := tx.Share.Create().
|
||||
SetCreatedAt(formatTime(s.CreatedAt)).
|
||||
SetUpdatedAt(formatTime(s.UpdatedAt)).
|
||||
SetViews(s.Views).
|
||||
SetRawID(int(s.ID)).
|
||||
SetDownloads(s.Downloads).
|
||||
SetFileID(sourceId).
|
||||
SetUserID(int(s.UserID))
|
||||
|
||||
if s.Password != "" {
|
||||
stm.SetPassword(s.Password)
|
||||
}
|
||||
|
||||
if s.Expires != nil {
|
||||
stm.SetNillableExpires(s.Expires)
|
||||
}
|
||||
|
||||
if s.RemainDownloads >= 0 {
|
||||
stm.SetRemainDownloads(s.RemainDownloads)
|
||||
}
|
||||
|
||||
if _, err := stm.Save(ctx); err != nil {
|
||||
_ = tx.Rollback()
|
||||
return fmt.Errorf("failed to create share %d: %w", s.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return fmt.Errorf("failed to commit transaction: %w", err)
|
||||
}
|
||||
|
||||
offset += batchSize
|
||||
m.state.ShareOffset = offset
|
||||
if err := m.saveState(); err != nil {
|
||||
m.l.Warning("Failed to save state after share batch: %s", err)
|
||||
} else {
|
||||
m.l.Info("Saved migration state after processing this batch")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
109
application/migrator/user.go
Normal file
109
application/migrator/user.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package migrator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/user"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
)
|
||||
|
||||
func (m *Migrator) migrateUser() error {
|
||||
m.l.Info("Migrating users...")
|
||||
batchSize := 1000
|
||||
// Start from the saved offset if available
|
||||
offset := m.state.UserOffset
|
||||
ctx := context.Background()
|
||||
if m.state.UserIDs == nil {
|
||||
m.state.UserIDs = make(map[int]bool)
|
||||
}
|
||||
|
||||
// If we're resuming, load existing user IDs
|
||||
if len(m.state.UserIDs) > 0 {
|
||||
m.l.Info("Resuming user migration from offset %d, %d users already migrated", offset, len(m.state.UserIDs))
|
||||
}
|
||||
|
||||
for {
|
||||
m.l.Info("Migrating users with offset %d", offset)
|
||||
var users []model.User
|
||||
if err := model.DB.Limit(batchSize).Offset(offset).Find(&users).Error; err != nil {
|
||||
return fmt.Errorf("failed to list v3 users: %w", err)
|
||||
}
|
||||
|
||||
if len(users) == 0 {
|
||||
if m.dep.ConfigProvider().Database().Type == conf.PostgresDB {
|
||||
m.l.Info("Resetting user ID sequence for postgres...")
|
||||
m.v4client.User.ExecContext(ctx, "SELECT SETVAL('users_id_seq', (SELECT MAX(id) FROM users))")
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
tx, err := m.v4client.Tx(context.Background())
|
||||
if err != nil {
|
||||
_ = tx.Rollback()
|
||||
return fmt.Errorf("failed to start transaction: %w", err)
|
||||
}
|
||||
|
||||
for _, u := range users {
|
||||
userStatus := user.StatusActive
|
||||
switch u.Status {
|
||||
case model.Active:
|
||||
userStatus = user.StatusActive
|
||||
case model.NotActivicated:
|
||||
userStatus = user.StatusInactive
|
||||
case model.Baned:
|
||||
userStatus = user.StatusManualBanned
|
||||
case model.OveruseBaned:
|
||||
userStatus = user.StatusSysBanned
|
||||
}
|
||||
|
||||
setting := &types.UserSetting{
|
||||
VersionRetention: true,
|
||||
VersionRetentionMax: 10,
|
||||
}
|
||||
|
||||
stm := tx.User.Create().
|
||||
SetRawID(int(u.ID)).
|
||||
SetCreatedAt(formatTime(u.CreatedAt)).
|
||||
SetUpdatedAt(formatTime(u.UpdatedAt)).
|
||||
SetEmail(u.Email).
|
||||
SetNick(u.Nick).
|
||||
SetStatus(userStatus).
|
||||
SetStorage(int64(u.Storage)).
|
||||
SetGroupID(int(u.GroupID)).
|
||||
SetSettings(setting).
|
||||
SetPassword(u.Password)
|
||||
|
||||
if u.TwoFactor != "" {
|
||||
stm.SetTwoFactorSecret(u.TwoFactor)
|
||||
}
|
||||
|
||||
if u.Avatar != "" {
|
||||
stm.SetAvatar(u.Avatar)
|
||||
}
|
||||
|
||||
if _, err := stm.Save(ctx); err != nil {
|
||||
_ = tx.Rollback()
|
||||
return fmt.Errorf("failed to create user %d: %w", u.ID, err)
|
||||
}
|
||||
|
||||
m.state.UserIDs[int(u.ID)] = true
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return fmt.Errorf("failed to commit transaction: %w", err)
|
||||
}
|
||||
|
||||
// Update the offset in state and save after each batch
|
||||
offset += batchSize
|
||||
m.state.UserOffset = offset
|
||||
if err := m.saveState(); err != nil {
|
||||
m.l.Warning("Failed to save state after user batch: %s", err)
|
||||
} else {
|
||||
m.l.Info("Saved migration state after processing %d users", offset)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
93
application/migrator/webdav.go
Normal file
93
application/migrator/webdav.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package migrator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/migrator/model"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
)
|
||||
|
||||
func (m *Migrator) migrateWebdav() error {
|
||||
m.l.Info("Migrating webdav accounts...")
|
||||
|
||||
batchSize := 1000
|
||||
offset := m.state.WebdavOffset
|
||||
ctx := context.Background()
|
||||
|
||||
if m.state.WebdavOffset > 0 {
|
||||
m.l.Info("Resuming webdav migration from offset %d", offset)
|
||||
}
|
||||
|
||||
for {
|
||||
m.l.Info("Migrating webdav accounts with offset %d", offset)
|
||||
var webdavAccounts []model.Webdav
|
||||
if err := model.DB.Limit(batchSize).Offset(offset).Find(&webdavAccounts).Error; err != nil {
|
||||
return fmt.Errorf("failed to list v3 webdav accounts: %w", err)
|
||||
}
|
||||
|
||||
if len(webdavAccounts) == 0 {
|
||||
if m.dep.ConfigProvider().Database().Type == conf.PostgresDB {
|
||||
m.l.Info("Resetting webdav account ID sequence for postgres...")
|
||||
m.v4client.DavAccount.ExecContext(ctx, "SELECT SETVAL('dav_accounts_id_seq', (SELECT MAX(id) FROM dav_accounts))")
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
tx, err := m.v4client.Tx(ctx)
|
||||
if err != nil {
|
||||
_ = tx.Rollback()
|
||||
return fmt.Errorf("failed to start transaction: %w", err)
|
||||
}
|
||||
|
||||
for _, webdavAccount := range webdavAccounts {
|
||||
if _, ok := m.state.UserIDs[int(webdavAccount.UserID)]; !ok {
|
||||
m.l.Warning("User %d not found, skipping webdav account %d", webdavAccount.UserID, webdavAccount.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
props := types.DavAccountProps{}
|
||||
options := boolset.BooleanSet{}
|
||||
|
||||
if webdavAccount.Readonly {
|
||||
boolset.Set(int(types.DavAccountReadOnly), true, &options)
|
||||
}
|
||||
|
||||
if webdavAccount.UseProxy {
|
||||
boolset.Set(int(types.DavAccountProxy), true, &options)
|
||||
}
|
||||
|
||||
stm := tx.DavAccount.Create().
|
||||
SetCreatedAt(formatTime(webdavAccount.CreatedAt)).
|
||||
SetUpdatedAt(formatTime(webdavAccount.UpdatedAt)).
|
||||
SetRawID(int(webdavAccount.ID)).
|
||||
SetName(webdavAccount.Name).
|
||||
SetURI("cloudreve://my" + webdavAccount.Root).
|
||||
SetPassword(webdavAccount.Password).
|
||||
SetProps(&props).
|
||||
SetOptions(&options).
|
||||
SetOwnerID(int(webdavAccount.UserID))
|
||||
|
||||
if _, err := stm.Save(ctx); err != nil {
|
||||
_ = tx.Rollback()
|
||||
return fmt.Errorf("failed to create webdav account %d: %w", webdavAccount.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return fmt.Errorf("failed to commit transaction: %w", err)
|
||||
}
|
||||
|
||||
offset += batchSize
|
||||
m.state.WebdavOffset = offset
|
||||
if err := m.saveState(); err != nil {
|
||||
m.l.Warning("Failed to save state after webdav batch: %s", err)
|
||||
} else {
|
||||
m.l.Info("Saved migration state after processing this batch")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
433
application/statics/embed.go
Normal file
433
application/statics/embed.go
Normal file
@@ -0,0 +1,433 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package embed provides access to files embedded in the running Go program.
|
||||
//
|
||||
// Go source files that import "embed" can use the //go:embed directive
|
||||
// to initialize a variable of type string, []byte, or FS with the contents of
|
||||
// files read from the package directory or subdirectories at compile time.
|
||||
//
|
||||
// For example, here are three ways to embed a file named hello.txt
|
||||
// and then print its contents at run time.
|
||||
//
|
||||
// Embedding one file into a string:
|
||||
//
|
||||
// import _ "embed"
|
||||
//
|
||||
// //go:embed hello.txt
|
||||
// var s string
|
||||
// print(s)
|
||||
//
|
||||
// Embedding one file into a slice of bytes:
|
||||
//
|
||||
// import _ "embed"
|
||||
//
|
||||
// //go:embed hello.txt
|
||||
// var b []byte
|
||||
// print(string(b))
|
||||
//
|
||||
// Embedded one or more files into a file system:
|
||||
//
|
||||
// import "embed"
|
||||
//
|
||||
// //go:embed hello.txt
|
||||
// var f embed.FS
|
||||
// data, _ := f.ReadFile("hello.txt")
|
||||
// print(string(data))
|
||||
//
|
||||
// # Directives
|
||||
//
|
||||
// A //go:embed directive above a variable declaration specifies which files to embed,
|
||||
// using one or more path.Match patterns.
|
||||
//
|
||||
// The directive must immediately precede a line containing the declaration of a single variable.
|
||||
// Only blank lines and ‘//’ line comments are permitted between the directive and the declaration.
|
||||
//
|
||||
// The type of the variable must be a string type, or a slice of a byte type,
|
||||
// or FS (or an alias of FS).
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// package server
|
||||
//
|
||||
// import "embed"
|
||||
//
|
||||
// // content holds our static web server content.
|
||||
// //go:embed image/* template/*
|
||||
// //go:embed html/index.html
|
||||
// var content embed.FS
|
||||
//
|
||||
// The Go build system will recognize the directives and arrange for the declared variable
|
||||
// (in the example above, content) to be populated with the matching files from the file system.
|
||||
//
|
||||
// The //go:embed directive accepts multiple space-separated patterns for
|
||||
// brevity, but it can also be repeated, to avoid very long lines when there are
|
||||
// many patterns. The patterns are interpreted relative to the package directory
|
||||
// containing the source file. The path separator is a forward slash, even on
|
||||
// Windows systems. Patterns may not contain ‘.’ or ‘..’ or empty path elements,
|
||||
// nor may they begin or end with a slash. To match everything in the current
|
||||
// directory, use ‘*’ instead of ‘.’. To allow for naming files with spaces in
|
||||
// their names, patterns can be written as Go double-quoted or back-quoted
|
||||
// string literals.
|
||||
//
|
||||
// If a pattern names a directory, all files in the subtree rooted at that directory are
|
||||
// embedded (recursively), except that files with names beginning with ‘.’ or ‘_’
|
||||
// are excluded. So the variable in the above example is almost equivalent to:
|
||||
//
|
||||
// // content is our static web server content.
|
||||
// //go:embed image template html/index.html
|
||||
// var content embed.FS
|
||||
//
|
||||
// The difference is that ‘image/*’ embeds ‘image/.tempfile’ while ‘image’ does not.
|
||||
// Neither embeds ‘image/dir/.tempfile’.
|
||||
//
|
||||
// If a pattern begins with the prefix ‘all:’, then the rule for walking directories is changed
|
||||
// to include those files beginning with ‘.’ or ‘_’. For example, ‘all:image’ embeds
|
||||
// both ‘image/.tempfile’ and ‘image/dir/.tempfile’.
|
||||
//
|
||||
// The //go:embed directive can be used with both exported and unexported variables,
|
||||
// depending on whether the package wants to make the data available to other packages.
|
||||
// It can only be used with variables at package scope, not with local variables.
|
||||
//
|
||||
// Patterns must not match files outside the package's module, such as ‘.git/*’ or symbolic links.
|
||||
// Patterns must not match files whose names include the special punctuation characters " * < > ? ` ' | / \ and :.
|
||||
// Matches for empty directories are ignored. After that, each pattern in a //go:embed line
|
||||
// must match at least one file or non-empty directory.
|
||||
//
|
||||
// If any patterns are invalid or have invalid matches, the build will fail.
|
||||
//
|
||||
// # Strings and Bytes
|
||||
//
|
||||
// The //go:embed line for a variable of type string or []byte can have only a single pattern,
|
||||
// and that pattern can match only a single file. The string or []byte is initialized with
|
||||
// the contents of that file.
|
||||
//
|
||||
// The //go:embed directive requires importing "embed", even when using a string or []byte.
|
||||
// In source files that don't refer to embed.FS, use a blank import (import _ "embed").
|
||||
//
|
||||
// # File Systems
|
||||
//
|
||||
// For embedding a single file, a variable of type string or []byte is often best.
|
||||
// The FS type enables embedding a tree of files, such as a directory of static
|
||||
// web server content, as in the example above.
|
||||
//
|
||||
// FS implements the io/fs package's FS interface, so it can be used with any package that
|
||||
// understands file systems, including net/http, text/template, and html/template.
|
||||
//
|
||||
// For example, given the content variable in the example above, we can write:
|
||||
//
|
||||
// http.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.FS(content))))
|
||||
//
|
||||
// template.ParseFS(content, "*.tmpl")
|
||||
//
|
||||
// # Tools
|
||||
//
|
||||
// To support tools that analyze Go packages, the patterns found in //go:embed lines
|
||||
// are available in “go list” output. See the EmbedPatterns, TestEmbedPatterns,
|
||||
// and XTestEmbedPatterns fields in the “go help list” output.
|
||||
package statics
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"io/fs"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An FS is a read-only collection of files, usually initialized with a //go:embed directive.
|
||||
// When declared without a //go:embed directive, an FS is an empty file system.
|
||||
//
|
||||
// An FS is a read-only value, so it is safe to use from multiple goroutines
|
||||
// simultaneously and also safe to assign values of type FS to each other.
|
||||
//
|
||||
// FS implements fs.FS, so it can be used with any package that understands
|
||||
// file system interfaces, including net/http, text/template, and html/template.
|
||||
//
|
||||
// See the package documentation for more details about initializing an FS.
|
||||
type FS struct {
|
||||
// The compiler knows the layout of this struct.
|
||||
// See cmd/compile/internal/staticdata's WriteEmbed.
|
||||
//
|
||||
// The files list is sorted by name but not by simple string comparison.
|
||||
// Instead, each file's name takes the form "dir/elem" or "dir/elem/".
|
||||
// The optional trailing slash indicates that the file is itself a directory.
|
||||
// The files list is sorted first by dir (if dir is missing, it is taken to be ".")
|
||||
// and then by base, so this list of files:
|
||||
//
|
||||
// p
|
||||
// q/
|
||||
// q/r
|
||||
// q/s/
|
||||
// q/s/t
|
||||
// q/s/u
|
||||
// q/v
|
||||
// w
|
||||
//
|
||||
// is actually sorted as:
|
||||
//
|
||||
// p # dir=. elem=p
|
||||
// q/ # dir=. elem=q
|
||||
// w/ # dir=. elem=w
|
||||
// q/r # dir=q elem=r
|
||||
// q/s/ # dir=q elem=s
|
||||
// q/v # dir=q elem=v
|
||||
// q/s/t # dir=q/s elem=t
|
||||
// q/s/u # dir=q/s elem=u
|
||||
//
|
||||
// This order brings directory contents together in contiguous sections
|
||||
// of the list, allowing a directory read to use binary search to find
|
||||
// the relevant sequence of entries.
|
||||
files *[]file
|
||||
}
|
||||
|
||||
// split splits the name into dir and elem as described in the
|
||||
// comment in the FS struct above. isDir reports whether the
|
||||
// final trailing slash was present, indicating that name is a directory.
|
||||
func split(name string) (dir, elem string, isDir bool) {
|
||||
if name[len(name)-1] == '/' {
|
||||
isDir = true
|
||||
name = name[:len(name)-1]
|
||||
}
|
||||
i := len(name) - 1
|
||||
for i >= 0 && name[i] != '/' {
|
||||
i--
|
||||
}
|
||||
if i < 0 {
|
||||
return ".", name, isDir
|
||||
}
|
||||
return name[:i], name[i+1:], isDir
|
||||
}
|
||||
|
||||
// trimSlash trims a trailing slash from name, if present,
|
||||
// returning the possibly shortened name.
|
||||
func trimSlash(name string) string {
|
||||
if len(name) > 0 && name[len(name)-1] == '/' {
|
||||
return name[:len(name)-1]
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.ReadDirFS = FS{}
|
||||
_ fs.ReadFileFS = FS{}
|
||||
)
|
||||
|
||||
// A file is a single file in the FS.
|
||||
// It implements fs.FileInfo and fs.DirEntry.
|
||||
type file struct {
|
||||
// The compiler knows the layout of this struct.
|
||||
// See cmd/compile/internal/staticdata's WriteEmbed.
|
||||
name string
|
||||
data string
|
||||
hash [16]byte // truncated SHA256 hash
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.FileInfo = (*file)(nil)
|
||||
_ fs.DirEntry = (*file)(nil)
|
||||
)
|
||||
|
||||
func (f *file) Name() string { _, elem, _ := split(f.name); return elem }
|
||||
func (f *file) Size() int64 { return int64(len(f.data)) }
|
||||
func (f *file) ModTime() time.Time { return f.modTime }
|
||||
func (f *file) IsDir() bool { _, _, isDir := split(f.name); return isDir }
|
||||
func (f *file) Sys() any { return nil }
|
||||
func (f *file) Type() fs.FileMode { return f.Mode().Type() }
|
||||
func (f *file) Info() (fs.FileInfo, error) { return f, nil }
|
||||
|
||||
func (f *file) Mode() fs.FileMode {
|
||||
if f.IsDir() {
|
||||
return fs.ModeDir | 0555
|
||||
}
|
||||
return 0444
|
||||
}
|
||||
|
||||
// dotFile is a file for the root directory,
|
||||
// which is omitted from the files list in a FS.
|
||||
var dotFile = &file{name: "./"}
|
||||
|
||||
// lookup returns the named file, or nil if it is not present.
|
||||
func (f FS) lookup(name string) *file {
|
||||
if !fs.ValidPath(name) {
|
||||
// The compiler should never emit a file with an invalid name,
|
||||
// so this check is not strictly necessary (if name is invalid,
|
||||
// we shouldn't find a match below), but it's a good backstop anyway.
|
||||
return nil
|
||||
}
|
||||
if name == "." {
|
||||
return dotFile
|
||||
}
|
||||
if f.files == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Binary search to find where name would be in the list,
|
||||
// and then check if name is at that position.
|
||||
dir, elem, _ := split(name)
|
||||
files := *f.files
|
||||
i := sortSearch(len(files), func(i int) bool {
|
||||
idir, ielem, _ := split(files[i].name)
|
||||
return idir > dir || idir == dir && ielem >= elem
|
||||
})
|
||||
if i < len(files) && trimSlash(files[i].name) == name {
|
||||
return &files[i]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readDir returns the list of files corresponding to the directory dir.
|
||||
func (f FS) readDir(dir string) []file {
|
||||
if f.files == nil {
|
||||
return nil
|
||||
}
|
||||
// Binary search to find where dir starts and ends in the list
|
||||
// and then return that slice of the list.
|
||||
files := *f.files
|
||||
i := sortSearch(len(files), func(i int) bool {
|
||||
idir, _, _ := split(files[i].name)
|
||||
return idir >= dir
|
||||
})
|
||||
j := sortSearch(len(files), func(j int) bool {
|
||||
jdir, _, _ := split(files[j].name)
|
||||
return jdir > dir
|
||||
})
|
||||
return files[i:j]
|
||||
}
|
||||
|
||||
// Open opens the named file for reading and returns it as an fs.File.
|
||||
//
|
||||
// The returned file implements io.Seeker when the file is not a directory.
|
||||
func (f FS) Open(name string) (fs.File, error) {
|
||||
file := f.lookup(name)
|
||||
if file == nil {
|
||||
return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist}
|
||||
}
|
||||
if file.IsDir() {
|
||||
return &openDir{file, f.readDir(name), 0}, nil
|
||||
}
|
||||
return &openFile{file, 0}, nil
|
||||
}
|
||||
|
||||
// ReadDir reads and returns the entire named directory.
|
||||
func (f FS) ReadDir(name string) ([]fs.DirEntry, error) {
|
||||
file, err := f.Open(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dir, ok := file.(*openDir)
|
||||
if !ok {
|
||||
return nil, &fs.PathError{Op: "read", Path: name, Err: errors.New("not a directory")}
|
||||
}
|
||||
list := make([]fs.DirEntry, len(dir.files))
|
||||
for i := range list {
|
||||
list[i] = &dir.files[i]
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// ReadFile reads and returns the content of the named file.
|
||||
func (f FS) ReadFile(name string) ([]byte, error) {
|
||||
file, err := f.Open(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ofile, ok := file.(*openFile)
|
||||
if !ok {
|
||||
return nil, &fs.PathError{Op: "read", Path: name, Err: errors.New("is a directory")}
|
||||
}
|
||||
return []byte(ofile.f.data), nil
|
||||
}
|
||||
|
||||
// An openFile is a regular file open for reading.
|
||||
type openFile struct {
|
||||
f *file // the file itself
|
||||
offset int64 // current read offset
|
||||
}
|
||||
|
||||
var (
|
||||
_ io.Seeker = (*openFile)(nil)
|
||||
)
|
||||
|
||||
func (f *openFile) Close() error { return nil }
|
||||
func (f *openFile) Stat() (fs.FileInfo, error) { return f.f, nil }
|
||||
|
||||
func (f *openFile) Read(b []byte) (int, error) {
|
||||
if f.offset >= int64(len(f.f.data)) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if f.offset < 0 {
|
||||
return 0, &fs.PathError{Op: "read", Path: f.f.name, Err: fs.ErrInvalid}
|
||||
}
|
||||
n := copy(b, f.f.data[f.offset:])
|
||||
f.offset += int64(n)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (f *openFile) Seek(offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
case 0:
|
||||
// offset += 0
|
||||
case 1:
|
||||
offset += f.offset
|
||||
case 2:
|
||||
offset += int64(len(f.f.data))
|
||||
}
|
||||
if offset < 0 || offset > int64(len(f.f.data)) {
|
||||
return 0, &fs.PathError{Op: "seek", Path: f.f.name, Err: fs.ErrInvalid}
|
||||
}
|
||||
f.offset = offset
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
// An openDir is a directory open for reading.
|
||||
type openDir struct {
|
||||
f *file // the directory file itself
|
||||
files []file // the directory contents
|
||||
offset int // the read offset, an index into the files slice
|
||||
}
|
||||
|
||||
func (d *openDir) Close() error { return nil }
|
||||
func (d *openDir) Stat() (fs.FileInfo, error) { return d.f, nil }
|
||||
|
||||
func (d *openDir) Read([]byte) (int, error) {
|
||||
return 0, &fs.PathError{Op: "read", Path: d.f.name, Err: errors.New("is a directory")}
|
||||
}
|
||||
|
||||
func (d *openDir) ReadDir(count int) ([]fs.DirEntry, error) {
|
||||
n := len(d.files) - d.offset
|
||||
if n == 0 {
|
||||
if count <= 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, io.EOF
|
||||
}
|
||||
if count > 0 && n > count {
|
||||
n = count
|
||||
}
|
||||
list := make([]fs.DirEntry, n)
|
||||
for i := range list {
|
||||
list[i] = &d.files[d.offset+i]
|
||||
}
|
||||
d.offset += n
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// sortSearch is like sort.Search, avoiding an import.
|
||||
func sortSearch(n int, f func(int) bool) int {
|
||||
// Define f(-1) == false and f(n) == true.
|
||||
// Invariant: f(i-1) == false, f(j) == true.
|
||||
i, j := 0, n
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1) // avoid overflow when computing h
|
||||
// i ≤ h < j
|
||||
if !f(h) {
|
||||
i = h + 1 // preserves f(i-1) == false
|
||||
} else {
|
||||
j = h // preserves f(j) == true
|
||||
}
|
||||
}
|
||||
// i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
|
||||
return i
|
||||
}
|
||||
230
application/statics/statics.go
Normal file
230
application/statics/statics.go
Normal file
@@ -0,0 +1,230 @@
|
||||
package statics
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bufio"
|
||||
"crypto/sha256"
|
||||
"debug/buildinfo"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/application/constants"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/gin-contrib/static"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const StaticFolder = "statics"
|
||||
|
||||
//go:embed assets.zip
|
||||
var zipContent string
|
||||
|
||||
type GinFS struct {
|
||||
FS http.FileSystem
|
||||
}
|
||||
|
||||
type version struct {
|
||||
Name string `json:"name"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
// Open 打开文件
|
||||
func (b *GinFS) Open(name string) (http.File, error) {
|
||||
return b.FS.Open(name)
|
||||
}
|
||||
|
||||
// Exists 文件是否存在
|
||||
func (b *GinFS) Exists(prefix string, filepath string) bool {
|
||||
if _, err := b.FS.Open(filepath); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// NewServerStaticFS 初始化静态资源文件
|
||||
func NewServerStaticFS(l logging.Logger, statics fs.FS, isPro bool) (static.ServeFileSystem, error) {
|
||||
var staticFS static.ServeFileSystem
|
||||
if util.Exists(util.DataPath(StaticFolder)) {
|
||||
l.Info("Folder with %q already exists, it will be used to serve static files.", util.DataPath(StaticFolder))
|
||||
staticFS = static.LocalFile(util.DataPath(StaticFolder), false)
|
||||
} else {
|
||||
// 初始化静态资源
|
||||
embedFS, err := fs.Sub(statics, "assets/build")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize static resources: %w", err)
|
||||
}
|
||||
|
||||
staticFS = &GinFS{
|
||||
FS: http.FS(embedFS),
|
||||
}
|
||||
}
|
||||
// 检查静态资源的版本
|
||||
f, err := staticFS.Open("version.json")
|
||||
if err != nil {
|
||||
l.Warning("Missing version identifier file in static resources, please delete \"statics\" folder and rebuild it.")
|
||||
return staticFS, nil
|
||||
}
|
||||
|
||||
b, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
l.Warning("Failed to read version identifier file in static resources, please delete \"statics\" folder and rebuild it.")
|
||||
return staticFS, nil
|
||||
}
|
||||
|
||||
var v version
|
||||
if err := json.Unmarshal(b, &v); err != nil {
|
||||
l.Warning("Failed to parse version identifier file in static resources: %s", err)
|
||||
return staticFS, nil
|
||||
}
|
||||
|
||||
staticName := "cloudreve-frontend"
|
||||
if isPro {
|
||||
staticName += "-pro"
|
||||
}
|
||||
|
||||
if v.Name != staticName {
|
||||
l.Error("Static resource version mismatch, please delete \"statics\" folder and rebuild it.")
|
||||
}
|
||||
|
||||
if v.Version != constants.BackendVersion {
|
||||
l.Error("Static resource version mismatch [Current %s, Desired: %s],please delete \"statics\" folder and rebuild it.", v.Version, constants.BackendVersion)
|
||||
}
|
||||
|
||||
return staticFS, nil
|
||||
}
|
||||
|
||||
func NewStaticFS(l logging.Logger) fs.FS {
|
||||
zipReader, err := zip.NewReader(strings.NewReader(zipContent), int64(len(zipContent)))
|
||||
if err != nil {
|
||||
l.Panic("Static resource is not a valid zip file: %s", err)
|
||||
}
|
||||
|
||||
var files []file
|
||||
modTime := getBuildTime()
|
||||
err = fs.WalkDir(zipReader, ".", func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot walk into %q: %w", path, err)
|
||||
}
|
||||
|
||||
if path == "." {
|
||||
return nil
|
||||
}
|
||||
|
||||
f := file{modTime: modTime}
|
||||
if d.IsDir() {
|
||||
f.name = path + "/"
|
||||
} else {
|
||||
f.name = path
|
||||
|
||||
rc, err := zipReader.Open(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("canot open %q: %w", path, err)
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
data, err := io.ReadAll(rc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read %q: %w", path, err)
|
||||
}
|
||||
|
||||
f.data = string(data)
|
||||
|
||||
hash := sha256.Sum256(data)
|
||||
for i := range f.hash {
|
||||
f.hash[i] = ^hash[i]
|
||||
}
|
||||
}
|
||||
files = append(files, f)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
l.Panic("Failed to initialize static resources: %s", err)
|
||||
}
|
||||
|
||||
sort.Slice(files, func(i, j int) bool {
|
||||
fi, fj := files[i], files[j]
|
||||
di, ei, _ := split(fi.name)
|
||||
dj, ej, _ := split(fj.name)
|
||||
|
||||
if di != dj {
|
||||
return di < dj
|
||||
}
|
||||
return ei < ej
|
||||
})
|
||||
|
||||
var embedFS FS
|
||||
embedFS.files = &files
|
||||
return embedFS
|
||||
}
|
||||
|
||||
// Eject 抽离内置静态资源
|
||||
func Eject(l logging.Logger, statics fs.FS) error {
|
||||
// 初始化静态资源
|
||||
embedFS, err := fs.Sub(statics, "assets/build")
|
||||
if err != nil {
|
||||
l.Panic("Failed to initialize static resources: %s", err)
|
||||
}
|
||||
|
||||
var walk func(relPath string, d fs.DirEntry, err error) error
|
||||
walk = func(relPath string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read info of %q: %s, skipping...", relPath, err)
|
||||
}
|
||||
|
||||
if !d.IsDir() {
|
||||
// 写入文件
|
||||
dst := util.DataPath(filepath.Join(StaticFolder, relPath))
|
||||
out, err := util.CreatNestedFile(dst)
|
||||
defer out.Close()
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create file %q: %s, skipping...", dst, err)
|
||||
}
|
||||
|
||||
l.Info("Ejecting %q...", dst)
|
||||
obj, _ := embedFS.Open(relPath)
|
||||
if _, err := io.Copy(out, bufio.NewReader(obj)); err != nil {
|
||||
return fmt.Errorf("cannot write file %q: %s, skipping...", relPath, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// util.Log().Info("开始导出内置静态资源...")
|
||||
err = fs.WalkDir(embedFS, ".", walk)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to eject static resources: %w", err)
|
||||
}
|
||||
|
||||
l.Info("Finish ejecting static resources.")
|
||||
return nil
|
||||
}
|
||||
|
||||
func getBuildTime() (buildTime time.Time) {
|
||||
buildTime = time.Now()
|
||||
exe, err := os.Executable()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
info, err := buildinfo.ReadFile(exe)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, s := range info.Settings {
|
||||
if s.Key == "vcs.time" && s.Value != "" {
|
||||
if t, err := time.Parse(time.RFC3339, s.Value); err == nil {
|
||||
buildTime = t
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
2
assets
2
assets
Submodule assets updated: 53aa9ace96...8f98777045
47
azure-pipelines.yml
Normal file
47
azure-pipelines.yml
Normal file
@@ -0,0 +1,47 @@
|
||||
trigger:
|
||||
tags:
|
||||
include:
|
||||
- '*'
|
||||
variables:
|
||||
GO_VERSION: "1.25.5"
|
||||
NODE_VERSION: "22.x"
|
||||
DOCKER_BUILDKIT: 1
|
||||
|
||||
pool:
|
||||
name: Default
|
||||
|
||||
jobs:
|
||||
- job: Release
|
||||
steps:
|
||||
- checkout: self
|
||||
submodules: true
|
||||
persistCredentials: true
|
||||
- task: NodeTool@0
|
||||
inputs:
|
||||
versionSpec: '$(NODE_VERSION)'
|
||||
displayName: 'Install Node.js'
|
||||
- task: GoTool@0
|
||||
inputs:
|
||||
version: "$(GO_VERSION)"
|
||||
displayName: Install Go
|
||||
- task: Docker@2
|
||||
inputs:
|
||||
containerRegistry: "CR DockerHub"
|
||||
command: "login"
|
||||
addPipelineData: false
|
||||
addBaseImageData: false
|
||||
- task: CmdLine@2
|
||||
displayName: "Install tonistiigi/binfmt"
|
||||
inputs:
|
||||
script: |
|
||||
docker run --privileged --rm tonistiigi/binfmt --install all
|
||||
- task: goreleaser@0
|
||||
condition: and(succeeded(), startsWith(variables['Build.SourceBranch'], 'refs/tags/'))
|
||||
inputs:
|
||||
version: "latest"
|
||||
distribution: "goreleaser"
|
||||
workdir: "$(Build.SourcesDirectory)"
|
||||
args: "release --timeout 60m -p 4"
|
||||
env:
|
||||
GITHUB_TOKEN: $(GITHUB_TOKEN)
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
package bootstrap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/HFO4/cloudreve/pkg/conf"
|
||||
)
|
||||
|
||||
// InitApplication 初始化应用常量
|
||||
func InitApplication() {
|
||||
fmt.Print(`
|
||||
___ _ _
|
||||
/ __\ | ___ _ _ __| |_ __ _____ _____
|
||||
/ / | |/ _ \| | | |/ _ | '__/ _ \ \ / / _ \
|
||||
/ /___| | (_) | |_| | (_| | | | __/\ V / __/
|
||||
\____/|_|\___/ \__,_|\__,_|_| \___| \_/ \___|
|
||||
|
||||
V` + conf.BackendVersion + ` Commit #` + conf.LastCommit + ` Pro=` + conf.IsPro + `
|
||||
================================================
|
||||
|
||||
`)
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package bootstrap
|
||||
|
||||
import (
|
||||
model "github.com/HFO4/cloudreve/models"
|
||||
"github.com/HFO4/cloudreve/pkg/aria2"
|
||||
"github.com/HFO4/cloudreve/pkg/auth"
|
||||
"github.com/HFO4/cloudreve/pkg/cache"
|
||||
"github.com/HFO4/cloudreve/pkg/conf"
|
||||
"github.com/HFO4/cloudreve/pkg/crontab"
|
||||
"github.com/HFO4/cloudreve/pkg/email"
|
||||
"github.com/HFO4/cloudreve/pkg/task"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// Init 初始化启动
|
||||
func Init(path string) {
|
||||
InitApplication()
|
||||
conf.Init(path)
|
||||
// Debug 关闭时,切换为生产模式
|
||||
if !conf.SystemConfig.Debug {
|
||||
gin.SetMode(gin.ReleaseMode)
|
||||
}
|
||||
cache.Init()
|
||||
if conf.SystemConfig.Mode == "master" {
|
||||
model.Init()
|
||||
task.Init()
|
||||
aria2.Init(false)
|
||||
email.Init()
|
||||
crontab.Init()
|
||||
InitStatic()
|
||||
}
|
||||
auth.Init()
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
package bootstrap
|
||||
|
||||
import (
|
||||
"github.com/HFO4/cloudreve/pkg/util"
|
||||
_ "github.com/HFO4/cloudreve/statik"
|
||||
"github.com/gin-contrib/static"
|
||||
"github.com/rakyll/statik/fs"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type GinFS struct {
|
||||
FS http.FileSystem
|
||||
}
|
||||
|
||||
// StaticFS 内置静态文件资源
|
||||
var StaticFS static.ServeFileSystem
|
||||
|
||||
// Open 打开文件
|
||||
func (b *GinFS) Open(name string) (http.File, error) {
|
||||
return b.FS.Open(name)
|
||||
}
|
||||
|
||||
// Exists 文件是否存在
|
||||
func (b *GinFS) Exists(prefix string, filepath string) bool {
|
||||
|
||||
if _, err := b.FS.Open(filepath); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
||||
}
|
||||
|
||||
// InitStatic 初始化静态资源文件
|
||||
func InitStatic() {
|
||||
var err error
|
||||
|
||||
if util.Exists(util.RelativePath("statics")) {
|
||||
util.Log().Info("检测到 statics 目录存在,将使用此目录下的静态资源文件")
|
||||
StaticFS = static.LocalFile(util.RelativePath("statics"), false)
|
||||
} else {
|
||||
StaticFS = &GinFS{}
|
||||
StaticFS.(*GinFS).FS, err = fs.New()
|
||||
if err != nil {
|
||||
util.Log().Panic("无法初始化静态资源, %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
128
build.sh
128
build.sh
@@ -1,128 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
REPO=$(cd $(dirname $0); pwd)
|
||||
COMMIT_SHA=$(git rev-parse --short HEAD)
|
||||
VERSION=$(git describe --tags)
|
||||
ASSETS="false"
|
||||
BINARY="false"
|
||||
RELEASE="false"
|
||||
|
||||
debugInfo () {
|
||||
echo "Repo: $REPO"
|
||||
echo "Build assets: $ASSETS"
|
||||
echo "Build binary: $BINARY"
|
||||
echo "Release: $RELEASE"
|
||||
echo "Version: $VERSION"
|
||||
echo "Commit: $COMMIT_SHA"
|
||||
}
|
||||
|
||||
buildAssets () {
|
||||
cd $REPO
|
||||
rm -rf assets/build
|
||||
rm -f statik/statik.go
|
||||
|
||||
export CI=false
|
||||
|
||||
cd $REPO/assets
|
||||
|
||||
yarn install
|
||||
yarn run build
|
||||
|
||||
if ! [ -x "$(command -v statik)" ]; then
|
||||
export CGO_ENABLED=0
|
||||
go get github.com/rakyll/statik
|
||||
fi
|
||||
|
||||
cd $REPO
|
||||
statik -src=assets/build/ -include=*.html,*.js,*.json,*.css,*.png,*.svg,*.ico -f
|
||||
}
|
||||
|
||||
buildBinary () {
|
||||
cd $REPO
|
||||
go build -a -o cloudreve -ldflags " -X 'github.com/HFO4/cloudreve/pkg/conf.BackendVersion=$VERSION' -X 'github.com/HFO4/cloudreve/pkg/conf.LastCommit=$COMMIT_SHA'"
|
||||
}
|
||||
|
||||
_build() {
|
||||
local osarch=$1
|
||||
IFS=/ read -r -a arr <<<"$osarch"
|
||||
os="${arr[0]}"
|
||||
arch="${arr[1]}"
|
||||
gcc="${arr[2]}"
|
||||
|
||||
# Go build to build the binary.
|
||||
export GOOS=$os
|
||||
export GOARCH=$arch
|
||||
export CC=$gcc
|
||||
export CGO_ENABLED=1
|
||||
|
||||
out="release/cloudreve_${VERSION}_${os}_${arch}"
|
||||
go build -a -o "${out}" -ldflags " -X 'github.com/HFO4/cloudreve/pkg/conf.BackendVersion=$VERSION' -X 'github.com/HFO4/cloudreve/pkg/conf.LastCommit=$COMMIT_SHA'"
|
||||
|
||||
if [ "$os" = "windows" ]; then
|
||||
mv $out release/cloudreve.exe
|
||||
zip -j -q "${out}.zip" release/cloudreve.exe
|
||||
rm -f "release/cloudreve.exe"
|
||||
else
|
||||
mv $out release/cloudreve
|
||||
tar -zcvf "${out}.tar.gz" -C release cloudreve
|
||||
rm -f "release/cloudreve"
|
||||
fi
|
||||
}
|
||||
|
||||
release(){
|
||||
cd $REPO
|
||||
## List of architectures and OS to test coss compilation.
|
||||
SUPPORTED_OSARCH="linux/amd64/gcc linux/arm/arm-linux-gnueabihf-gcc windows/amd64/x86_64-w64-mingw32-gcc"
|
||||
|
||||
echo "Release builds for OS/Arch/CC: ${SUPPORTED_OSARCH}"
|
||||
for each_osarch in ${SUPPORTED_OSARCH}; do
|
||||
_build "${each_osarch}"
|
||||
done
|
||||
}
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 [-a] [-c] [-b] [-r]" 1>&2;
|
||||
exit 1;
|
||||
}
|
||||
|
||||
while getopts "bacr:d" o; do
|
||||
case "${o}" in
|
||||
b)
|
||||
ASSETS="true"
|
||||
BINARY="true"
|
||||
;;
|
||||
a)
|
||||
ASSETS="true"
|
||||
;;
|
||||
c)
|
||||
BINARY="true"
|
||||
;;
|
||||
r)
|
||||
ASSETS="true"
|
||||
RELEASE="true"
|
||||
;;
|
||||
d)
|
||||
DEBUG="true"
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND-1))
|
||||
|
||||
if [ "$DEBUG" = "true" ]; then
|
||||
debugInfo
|
||||
fi
|
||||
|
||||
if [ "$ASSETS" = "true" ]; then
|
||||
buildAssets
|
||||
fi
|
||||
|
||||
if [ "$BINARY" = "true" ]; then
|
||||
buildBinary
|
||||
fi
|
||||
|
||||
if [ "$RELEASE" = "true" ]; then
|
||||
release
|
||||
fi
|
||||
30
cmd/eject.go
Normal file
30
cmd/eject.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/cloudreve/Cloudreve/v4/application/constants"
|
||||
"github.com/cloudreve/Cloudreve/v4/application/dependency"
|
||||
"github.com/cloudreve/Cloudreve/v4/application/statics"
|
||||
"github.com/spf13/cobra"
|
||||
"os"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(ejectCmd)
|
||||
}
|
||||
|
||||
var ejectCmd = &cobra.Command{
|
||||
Use: "eject",
|
||||
Short: "Eject all embedded static files",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dep := dependency.NewDependency(
|
||||
dependency.WithConfigPath(confPath),
|
||||
dependency.WithProFlag(constants.IsPro == "true"),
|
||||
)
|
||||
logger := dep.Logger()
|
||||
|
||||
if err := statics.Eject(dep.Logger(), dep.Statics()); err != nil {
|
||||
logger.Error("Failed to eject static files: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
},
|
||||
}
|
||||
230
cmd/masterkey.go
Normal file
230
cmd/masterkey.go
Normal file
@@ -0,0 +1,230 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/dependency"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/entity"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/encrypt"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
outputToFile string
|
||||
newMasterKeyFile string
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(masterKeyCmd)
|
||||
masterKeyCmd.AddCommand(masterKeyGenerateCmd)
|
||||
masterKeyCmd.AddCommand(masterKeyGetCmd)
|
||||
masterKeyCmd.AddCommand(masterKeyRotateCmd)
|
||||
|
||||
masterKeyGenerateCmd.Flags().StringVarP(&outputToFile, "output", "o", "", "Output master key to file instead of stdout")
|
||||
masterKeyRotateCmd.Flags().StringVarP(&newMasterKeyFile, "new-key", "n", "", "Path to file containing the new master key (base64 encoded).")
|
||||
}
|
||||
|
||||
var masterKeyCmd = &cobra.Command{
|
||||
Use: "master-key",
|
||||
Short: "Master encryption key management",
|
||||
Long: "Manage master encryption keys for file encryption. Use subcommands to generate, get, or rotate keys.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
_ = cmd.Help()
|
||||
},
|
||||
}
|
||||
|
||||
var masterKeyGenerateCmd = &cobra.Command{
|
||||
Use: "generate",
|
||||
Short: "Generate a new master encryption key",
|
||||
Long: "Generate a new random 32-byte (256-bit) master encryption key and output it in base64 format.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
// Generate 32-byte random key
|
||||
key := make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, key); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: Failed to generate random key: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Encode to base64
|
||||
encodedKey := base64.StdEncoding.EncodeToString(key)
|
||||
|
||||
if outputToFile != "" {
|
||||
// Write to file
|
||||
if err := os.WriteFile(outputToFile, []byte(encodedKey), 0600); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: Failed to write key to file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Master key generated and saved to: %s\n", outputToFile)
|
||||
} else {
|
||||
// Output to stdout
|
||||
fmt.Println(encodedKey)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var masterKeyGetCmd = &cobra.Command{
|
||||
Use: "get",
|
||||
Short: "Get the current master encryption key",
|
||||
Long: "Retrieve and display the current master encryption key from the configured vault (setting, env, or file).",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
ctx := context.Background()
|
||||
dep := dependency.NewDependency(
|
||||
dependency.WithConfigPath(confPath),
|
||||
)
|
||||
logger := dep.Logger()
|
||||
|
||||
// Get the master key vault
|
||||
vault := encrypt.NewMasterEncryptKeyVault(ctx, dep.SettingProvider())
|
||||
|
||||
// Retrieve the master key
|
||||
key, err := vault.GetMasterKey(ctx)
|
||||
if err != nil {
|
||||
logger.Error("Failed to get master key: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Encode to base64 and display
|
||||
encodedKey := base64.StdEncoding.EncodeToString(key)
|
||||
fmt.Println("")
|
||||
fmt.Println(encodedKey)
|
||||
},
|
||||
}
|
||||
|
||||
var masterKeyRotateCmd = &cobra.Command{
|
||||
Use: "rotate",
|
||||
Short: "Rotate the master encryption key",
|
||||
Long: `Rotate the master encryption key by re-encrypting all encrypted file keys with a new master key.
|
||||
This operation:
|
||||
1. Retrieves the current master key
|
||||
2. Loads a new master key from file
|
||||
3. Re-encrypts all file encryption keys with the new master key
|
||||
4. Updates the master key in the settings database
|
||||
|
||||
Warning: This is a critical operation. Make sure to backup your database before proceeding.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
ctx := context.Background()
|
||||
dep := dependency.NewDependency(
|
||||
dependency.WithConfigPath(confPath),
|
||||
)
|
||||
logger := dep.Logger()
|
||||
|
||||
logger.Info("Starting master key rotation...")
|
||||
|
||||
// Get the old master key
|
||||
vault := encrypt.NewMasterEncryptKeyVault(ctx, dep.SettingProvider())
|
||||
oldMasterKey, err := vault.GetMasterKey(ctx)
|
||||
if err != nil {
|
||||
logger.Error("Failed to get current master key: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
logger.Info("Retrieved current master key")
|
||||
|
||||
// Get or generate the new master key
|
||||
var newMasterKey []byte
|
||||
// Load from file
|
||||
keyData, err := os.ReadFile(newMasterKeyFile)
|
||||
if err != nil {
|
||||
logger.Error("Failed to read new master key file: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
newMasterKey, err = base64.StdEncoding.DecodeString(string(keyData))
|
||||
if err != nil {
|
||||
logger.Error("Failed to decode new master key: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if len(newMasterKey) != 32 {
|
||||
logger.Error("Invalid new master key: must be 32 bytes (256 bits), got %d bytes", len(newMasterKey))
|
||||
os.Exit(1)
|
||||
}
|
||||
logger.Info("Loaded new master key from file: %s", newMasterKeyFile)
|
||||
|
||||
// Query all entities with encryption metadata
|
||||
db := dep.DBClient()
|
||||
entities, err := db.Entity.Query().
|
||||
Where(entity.Not(entity.PropsIsNil())).
|
||||
All(ctx)
|
||||
if err != nil {
|
||||
logger.Error("Failed to query entities: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
logger.Info("Found %d entities to check for encryption", len(entities))
|
||||
|
||||
// Re-encrypt each entity's encryption key
|
||||
encryptedCount := 0
|
||||
for _, ent := range entities {
|
||||
if ent.Props == nil || ent.Props.EncryptMetadata == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
encMeta := ent.Props.EncryptMetadata
|
||||
|
||||
// Decrypt the file key with old master key
|
||||
decryptedFileKey, err := encrypt.DecryptWithMasterKey(oldMasterKey, encMeta.Key)
|
||||
if err != nil {
|
||||
logger.Error("Failed to decrypt key for entity %d: %s", ent.ID, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Re-encrypt the file key with new master key
|
||||
newEncryptedKey, err := encrypt.EncryptWithMasterKey(newMasterKey, decryptedFileKey)
|
||||
if err != nil {
|
||||
logger.Error("Failed to re-encrypt key for entity %d: %s", ent.ID, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Update the entity
|
||||
newProps := *ent.Props
|
||||
newProps.EncryptMetadata = &types.EncryptMetadata{
|
||||
Algorithm: encMeta.Algorithm,
|
||||
Key: newEncryptedKey,
|
||||
KeyPlainText: nil, // Don't store plaintext
|
||||
IV: encMeta.IV,
|
||||
}
|
||||
|
||||
err = db.Entity.UpdateOne(ent).
|
||||
SetProps(&newProps).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
logger.Error("Failed to update entity %d: %s", ent.ID, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
encryptedCount++
|
||||
}
|
||||
|
||||
logger.Info("Re-encrypted %d file keys", encryptedCount)
|
||||
|
||||
// Update the master key in settings
|
||||
keyStore := dep.SettingProvider().MasterEncryptKeyVault(ctx)
|
||||
if keyStore == setting.MasterEncryptKeyVaultTypeSetting {
|
||||
encodedNewKey := base64.StdEncoding.EncodeToString(newMasterKey)
|
||||
err = dep.SettingClient().Set(ctx, map[string]string{
|
||||
"encrypt_master_key": encodedNewKey,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error("Failed to update master key in settings: %s", err)
|
||||
logger.Error("WARNING: File keys have been re-encrypted but master key update failed!")
|
||||
logger.Error("Please manually update the encrypt_master_key setting.")
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
logger.Info("Current master key is stored in %q", keyStore)
|
||||
if keyStore == setting.MasterEncryptKeyVaultTypeEnv {
|
||||
logger.Info("Please update the new master encryption key in your \"CR_ENCRYPT_MASTER_KEY\" environment variable.")
|
||||
} else if keyStore == setting.MasterEncryptKeyVaultTypeFile {
|
||||
logger.Info("Please update the new master encryption key in your key file: %q", dep.SettingProvider().MasterEncryptKeyFile(ctx))
|
||||
}
|
||||
logger.Info("Last step: Please manually update the new master encryption key in your ENV or key file.")
|
||||
}
|
||||
|
||||
logger.Info("Master key rotation completed successfully")
|
||||
},
|
||||
}
|
||||
69
cmd/migrate.go
Normal file
69
cmd/migrate.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/constants"
|
||||
"github.com/cloudreve/Cloudreve/v4/application/dependency"
|
||||
"github.com/cloudreve/Cloudreve/v4/application/migrator"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
v3ConfPath string
|
||||
forceReset bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(migrateCmd)
|
||||
migrateCmd.PersistentFlags().StringVar(&v3ConfPath, "v3-conf", "", "Path to the v3 config file")
|
||||
migrateCmd.PersistentFlags().BoolVar(&forceReset, "force-reset", false, "Force reset migration state and start from beginning")
|
||||
}
|
||||
|
||||
var migrateCmd = &cobra.Command{
|
||||
Use: "migrate",
|
||||
Short: "Migrate from v3 to v4",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dep := dependency.NewDependency(
|
||||
dependency.WithConfigPath(confPath),
|
||||
dependency.WithRequiredDbVersion(constants.BackendVersion),
|
||||
dependency.WithProFlag(constants.IsPro == "true"),
|
||||
)
|
||||
logger := dep.Logger()
|
||||
logger.Info("Migrating from v3 to v4...")
|
||||
|
||||
if v3ConfPath == "" {
|
||||
logger.Error("v3 config file is required, please use -v3-conf to specify the path.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Check if state file exists and warn about resuming
|
||||
stateFilePath := filepath.Join(filepath.Dir(v3ConfPath), "migration_state.json")
|
||||
if util.Exists(stateFilePath) && !forceReset {
|
||||
logger.Info("Found existing migration state file at %s. Migration will resume from the last successful step.", stateFilePath)
|
||||
logger.Info("If you want to start migration from the beginning, please use --force-reset flag.")
|
||||
} else if forceReset && util.Exists(stateFilePath) {
|
||||
logger.Info("Force resetting migration state. Will start from the beginning.")
|
||||
if err := os.Remove(stateFilePath); err != nil {
|
||||
logger.Error("Failed to remove migration state file: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
migrator, err := migrator.NewMigrator(dep, v3ConfPath)
|
||||
if err != nil {
|
||||
logger.Error("Failed to create migrator: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err := migrator.Migrate(); err != nil {
|
||||
logger.Error("Failed to migrate: %s", err)
|
||||
logger.Info("Migration failed but state has been saved. You can retry with the same command to resume from the last successful step.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
logger.Info("Migration from v3 to v4 completed successfully.")
|
||||
},
|
||||
}
|
||||
44
cmd/root.go
Normal file
44
cmd/root.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
var (
|
||||
confPath string
|
||||
licenseKey string
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.PersistentFlags().StringVarP(&confPath, "conf", "c", util.DataPath("conf.ini"), "Path to the config file")
|
||||
rootCmd.PersistentFlags().BoolVarP(&util.UseWorkingDir, "use-working-dir", "w", false, "Use working directory, instead of executable directory")
|
||||
}
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "cloudreve",
|
||||
Short: "Cloudreve is a server-side self-hosted cloud storage platform",
|
||||
Long: `Self-hosted file management and sharing system, supports multiple storage providers.
|
||||
Complete documentation is available at https://docs.cloudreve.org/`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
// Do Stuff Here
|
||||
},
|
||||
}
|
||||
|
||||
func Execute() {
|
||||
cmd, _, err := rootCmd.Find(os.Args[1:])
|
||||
// redirect to default server cmd if no cmd is given
|
||||
if err == nil && cmd.Use == rootCmd.Use && cmd.Flags().Parse(os.Args[1:]) != pflag.ErrHelp {
|
||||
args := append([]string{"server"}, os.Args[1:]...)
|
||||
rootCmd.SetArgs(args)
|
||||
}
|
||||
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
55
cmd/server.go
Normal file
55
cmd/server.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application"
|
||||
"github.com/cloudreve/Cloudreve/v4/application/constants"
|
||||
"github.com/cloudreve/Cloudreve/v4/application/dependency"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(serverCmd)
|
||||
serverCmd.PersistentFlags().StringVarP(&licenseKey, "license-key", "l", "", "License key of your Cloudreve Pro")
|
||||
}
|
||||
|
||||
var serverCmd = &cobra.Command{
|
||||
Use: "server",
|
||||
Short: "Start a Cloudreve server with the given config file",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dep := dependency.NewDependency(
|
||||
dependency.WithConfigPath(confPath),
|
||||
dependency.WithProFlag(constants.IsProBool),
|
||||
dependency.WithRequiredDbVersion(constants.BackendVersion),
|
||||
)
|
||||
server := application.NewServer(dep)
|
||||
logger := dep.Logger()
|
||||
|
||||
server.PrintBanner()
|
||||
|
||||
// Graceful shutdown after received signal.
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGQUIT)
|
||||
go shutdown(sigChan, logger, server)
|
||||
|
||||
if err := server.Start(); err != nil {
|
||||
logger.Error("Failed to start server: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
<-sigChan
|
||||
}()
|
||||
},
|
||||
}
|
||||
|
||||
func shutdown(sigChan chan os.Signal, logger logging.Logger, server application.Server) {
|
||||
sig := <-sigChan
|
||||
logger.Info("Signal %s received, shutting down server...", sig)
|
||||
server.Close()
|
||||
close(sigChan)
|
||||
}
|
||||
47
docker-compose.yml
Normal file
47
docker-compose.yml
Normal file
@@ -0,0 +1,47 @@
|
||||
services:
|
||||
cloudreve:
|
||||
image: cloudreve/cloudreve:latest
|
||||
container_name: cloudreve-backend
|
||||
depends_on:
|
||||
- postgresql
|
||||
- redis
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 5212:5212
|
||||
- 6888:6888
|
||||
- 6888:6888/udp
|
||||
environment:
|
||||
- CR_CONF_Database.Type=postgres
|
||||
- CR_CONF_Database.Host=postgresql
|
||||
- CR_CONF_Database.User=cloudreve
|
||||
- CR_CONF_Database.Name=cloudreve
|
||||
- CR_CONF_Database.Port=5432
|
||||
- CR_CONF_Redis.Server=redis:6379
|
||||
volumes:
|
||||
- backend_data:/cloudreve/data
|
||||
|
||||
postgresql:
|
||||
# Best practice: Pin to major version.
|
||||
# NOTE: For major version jumps:
|
||||
# backup & consult https://www.postgresql.org/docs/current/pgupgrade.html
|
||||
image: postgres:17
|
||||
container_name: postgresql
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- POSTGRES_USER=cloudreve
|
||||
- POSTGRES_DB=cloudreve
|
||||
- POSTGRES_HOST_AUTH_METHOD=trust
|
||||
volumes:
|
||||
- database_postgres:/var/lib/postgresql/data
|
||||
|
||||
redis:
|
||||
image: redis:latest
|
||||
container_name: redis
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- redis_data:/data
|
||||
|
||||
volumes:
|
||||
backend_data:
|
||||
database_postgres:
|
||||
redis_data:
|
||||
3137
ent/client.go
Normal file
3137
ent/client.go
Normal file
File diff suppressed because it is too large
Load Diff
242
ent/davaccount.go
Normal file
242
ent/davaccount.go
Normal file
@@ -0,0 +1,242 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/user"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
)
|
||||
|
||||
// DavAccount is the model entity for the DavAccount schema.
|
||||
type DavAccount struct {
|
||||
config `json:"-"`
|
||||
// ID of the ent.
|
||||
ID int `json:"id,omitempty"`
|
||||
// CreatedAt holds the value of the "created_at" field.
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// UpdatedAt holds the value of the "updated_at" field.
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
// DeletedAt holds the value of the "deleted_at" field.
|
||||
DeletedAt *time.Time `json:"deleted_at,omitempty"`
|
||||
// Name holds the value of the "name" field.
|
||||
Name string `json:"name,omitempty"`
|
||||
// URI holds the value of the "uri" field.
|
||||
URI string `json:"uri,omitempty"`
|
||||
// Password holds the value of the "password" field.
|
||||
Password string `json:"-"`
|
||||
// Options holds the value of the "options" field.
|
||||
Options *boolset.BooleanSet `json:"options,omitempty"`
|
||||
// Props holds the value of the "props" field.
|
||||
Props *types.DavAccountProps `json:"props,omitempty"`
|
||||
// OwnerID holds the value of the "owner_id" field.
|
||||
OwnerID int `json:"owner_id,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the DavAccountQuery when eager-loading is set.
|
||||
Edges DavAccountEdges `json:"edges"`
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// DavAccountEdges holds the relations/edges for other nodes in the graph.
|
||||
type DavAccountEdges struct {
|
||||
// Owner holds the value of the owner edge.
|
||||
Owner *User `json:"owner,omitempty"`
|
||||
// loadedTypes holds the information for reporting if a
|
||||
// type was loaded (or requested) in eager-loading or not.
|
||||
loadedTypes [1]bool
|
||||
}
|
||||
|
||||
// OwnerOrErr returns the Owner value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e DavAccountEdges) OwnerOrErr() (*User, error) {
|
||||
if e.loadedTypes[0] {
|
||||
if e.Owner == nil {
|
||||
// Edge was loaded but was not found.
|
||||
return nil, &NotFoundError{label: user.Label}
|
||||
}
|
||||
return e.Owner, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "owner"}
|
||||
}
|
||||
|
||||
// scanValues returns the types for scanning values from sql.Rows.
|
||||
func (*DavAccount) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case davaccount.FieldProps:
|
||||
values[i] = new([]byte)
|
||||
case davaccount.FieldOptions:
|
||||
values[i] = new(boolset.BooleanSet)
|
||||
case davaccount.FieldID, davaccount.FieldOwnerID:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case davaccount.FieldName, davaccount.FieldURI, davaccount.FieldPassword:
|
||||
values[i] = new(sql.NullString)
|
||||
case davaccount.FieldCreatedAt, davaccount.FieldUpdatedAt, davaccount.FieldDeletedAt:
|
||||
values[i] = new(sql.NullTime)
|
||||
default:
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||
// to the DavAccount fields.
|
||||
func (da *DavAccount) assignValues(columns []string, values []any) error {
|
||||
if m, n := len(values), len(columns); m < n {
|
||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||
}
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case davaccount.FieldID:
|
||||
value, ok := values[i].(*sql.NullInt64)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected type %T for field id", value)
|
||||
}
|
||||
da.ID = int(value.Int64)
|
||||
case davaccount.FieldCreatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||
} else if value.Valid {
|
||||
da.CreatedAt = value.Time
|
||||
}
|
||||
case davaccount.FieldUpdatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||
} else if value.Valid {
|
||||
da.UpdatedAt = value.Time
|
||||
}
|
||||
case davaccount.FieldDeletedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
|
||||
} else if value.Valid {
|
||||
da.DeletedAt = new(time.Time)
|
||||
*da.DeletedAt = value.Time
|
||||
}
|
||||
case davaccount.FieldName:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field name", values[i])
|
||||
} else if value.Valid {
|
||||
da.Name = value.String
|
||||
}
|
||||
case davaccount.FieldURI:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field uri", values[i])
|
||||
} else if value.Valid {
|
||||
da.URI = value.String
|
||||
}
|
||||
case davaccount.FieldPassword:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field password", values[i])
|
||||
} else if value.Valid {
|
||||
da.Password = value.String
|
||||
}
|
||||
case davaccount.FieldOptions:
|
||||
if value, ok := values[i].(*boolset.BooleanSet); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field options", values[i])
|
||||
} else if value != nil {
|
||||
da.Options = value
|
||||
}
|
||||
case davaccount.FieldProps:
|
||||
if value, ok := values[i].(*[]byte); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field props", values[i])
|
||||
} else if value != nil && len(*value) > 0 {
|
||||
if err := json.Unmarshal(*value, &da.Props); err != nil {
|
||||
return fmt.Errorf("unmarshal field props: %w", err)
|
||||
}
|
||||
}
|
||||
case davaccount.FieldOwnerID:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field owner_id", values[i])
|
||||
} else if value.Valid {
|
||||
da.OwnerID = int(value.Int64)
|
||||
}
|
||||
default:
|
||||
da.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the DavAccount.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (da *DavAccount) Value(name string) (ent.Value, error) {
|
||||
return da.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryOwner queries the "owner" edge of the DavAccount entity.
|
||||
func (da *DavAccount) QueryOwner() *UserQuery {
|
||||
return NewDavAccountClient(da.config).QueryOwner(da)
|
||||
}
|
||||
|
||||
// Update returns a builder for updating this DavAccount.
|
||||
// Note that you need to call DavAccount.Unwrap() before calling this method if this DavAccount
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
func (da *DavAccount) Update() *DavAccountUpdateOne {
|
||||
return NewDavAccountClient(da.config).UpdateOne(da)
|
||||
}
|
||||
|
||||
// Unwrap unwraps the DavAccount entity that was returned from a transaction after it was closed,
|
||||
// so that all future queries will be executed through the driver which created the transaction.
|
||||
func (da *DavAccount) Unwrap() *DavAccount {
|
||||
_tx, ok := da.config.driver.(*txDriver)
|
||||
if !ok {
|
||||
panic("ent: DavAccount is not a transactional entity")
|
||||
}
|
||||
da.config.driver = _tx.drv
|
||||
return da
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer.
|
||||
func (da *DavAccount) String() string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("DavAccount(")
|
||||
builder.WriteString(fmt.Sprintf("id=%v, ", da.ID))
|
||||
builder.WriteString("created_at=")
|
||||
builder.WriteString(da.CreatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("updated_at=")
|
||||
builder.WriteString(da.UpdatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
if v := da.DeletedAt; v != nil {
|
||||
builder.WriteString("deleted_at=")
|
||||
builder.WriteString(v.Format(time.ANSIC))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("name=")
|
||||
builder.WriteString(da.Name)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("uri=")
|
||||
builder.WriteString(da.URI)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("password=<sensitive>")
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("options=")
|
||||
builder.WriteString(fmt.Sprintf("%v", da.Options))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("props=")
|
||||
builder.WriteString(fmt.Sprintf("%v", da.Props))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("owner_id=")
|
||||
builder.WriteString(fmt.Sprintf("%v", da.OwnerID))
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// SetOwner manually set the edge as loaded state.
|
||||
func (e *DavAccount) SetOwner(v *User) {
|
||||
e.Edges.Owner = v
|
||||
e.Edges.loadedTypes[0] = true
|
||||
}
|
||||
|
||||
// DavAccounts is a parsable slice of DavAccount.
|
||||
type DavAccounts []*DavAccount
|
||||
144
ent/davaccount/davaccount.go
Normal file
144
ent/davaccount/davaccount.go
Normal file
@@ -0,0 +1,144 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package davaccount
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
const (
|
||||
// Label holds the string label denoting the davaccount type in the database.
|
||||
Label = "dav_account"
|
||||
// FieldID holds the string denoting the id field in the database.
|
||||
FieldID = "id"
|
||||
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||
FieldCreatedAt = "created_at"
|
||||
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||
FieldUpdatedAt = "updated_at"
|
||||
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
|
||||
FieldDeletedAt = "deleted_at"
|
||||
// FieldName holds the string denoting the name field in the database.
|
||||
FieldName = "name"
|
||||
// FieldURI holds the string denoting the uri field in the database.
|
||||
FieldURI = "uri"
|
||||
// FieldPassword holds the string denoting the password field in the database.
|
||||
FieldPassword = "password"
|
||||
// FieldOptions holds the string denoting the options field in the database.
|
||||
FieldOptions = "options"
|
||||
// FieldProps holds the string denoting the props field in the database.
|
||||
FieldProps = "props"
|
||||
// FieldOwnerID holds the string denoting the owner_id field in the database.
|
||||
FieldOwnerID = "owner_id"
|
||||
// EdgeOwner holds the string denoting the owner edge name in mutations.
|
||||
EdgeOwner = "owner"
|
||||
// Table holds the table name of the davaccount in the database.
|
||||
Table = "dav_accounts"
|
||||
// OwnerTable is the table that holds the owner relation/edge.
|
||||
OwnerTable = "dav_accounts"
|
||||
// OwnerInverseTable is the table name for the User entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "user" package.
|
||||
OwnerInverseTable = "users"
|
||||
// OwnerColumn is the table column denoting the owner relation/edge.
|
||||
OwnerColumn = "owner_id"
|
||||
)
|
||||
|
||||
// Columns holds all SQL columns for davaccount fields.
|
||||
var Columns = []string{
|
||||
FieldID,
|
||||
FieldCreatedAt,
|
||||
FieldUpdatedAt,
|
||||
FieldDeletedAt,
|
||||
FieldName,
|
||||
FieldURI,
|
||||
FieldPassword,
|
||||
FieldOptions,
|
||||
FieldProps,
|
||||
FieldOwnerID,
|
||||
}
|
||||
|
||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||
func ValidColumn(column string) bool {
|
||||
for i := range Columns {
|
||||
if column == Columns[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Note that the variables below are initialized by the runtime
|
||||
// package on the initialization of the application. Therefore,
|
||||
// it should be imported in the main as follows:
|
||||
//
|
||||
// import _ "github.com/cloudreve/Cloudreve/v4/ent/runtime"
|
||||
var (
|
||||
Hooks [1]ent.Hook
|
||||
Interceptors [1]ent.Interceptor
|
||||
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||
DefaultCreatedAt func() time.Time
|
||||
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||
DefaultUpdatedAt func() time.Time
|
||||
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||
UpdateDefaultUpdatedAt func() time.Time
|
||||
)
|
||||
|
||||
// OrderOption defines the ordering options for the DavAccount queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreatedAt orders the results by the created_at field.
|
||||
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUpdatedAt orders the results by the updated_at field.
|
||||
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByDeletedAt orders the results by the deleted_at field.
|
||||
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByName orders the results by the name field.
|
||||
func ByName(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldName, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByURI orders the results by the uri field.
|
||||
func ByURI(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldURI, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByPassword orders the results by the password field.
|
||||
func ByPassword(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldPassword, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByOwnerID orders the results by the owner_id field.
|
||||
func ByOwnerID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldOwnerID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByOwnerField orders the results by owner field.
|
||||
func ByOwnerField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newOwnerStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
func newOwnerStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(OwnerInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),
|
||||
)
|
||||
}
|
||||
530
ent/davaccount/where.go
Normal file
530
ent/davaccount/where.go
Normal file
@@ -0,0 +1,530 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package davaccount
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
)
|
||||
|
||||
// ID filters vertices based on their ID field.
|
||||
func ID(id int) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDEQ applies the EQ predicate on the ID field.
|
||||
func IDEQ(id int) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDNEQ applies the NEQ predicate on the ID field.
|
||||
func IDNEQ(id int) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldNEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDIn applies the In predicate on the ID field.
|
||||
func IDIn(ids ...int) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDNotIn applies the NotIn predicate on the ID field.
|
||||
func IDNotIn(ids ...int) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldNotIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDGT applies the GT predicate on the ID field.
|
||||
func IDGT(id int) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldGT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDGTE applies the GTE predicate on the ID field.
|
||||
func IDGTE(id int) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldGTE(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLT applies the LT predicate on the ID field.
|
||||
func IDLT(id int) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldLT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLTE applies the LTE predicate on the ID field.
|
||||
func IDLTE(id int) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldLTE(FieldID, id))
|
||||
}
|
||||
|
||||
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||
func CreatedAt(v time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
|
||||
func UpdatedAt(v time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ.
|
||||
func DeletedAt(v time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldEQ(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
|
||||
func Name(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldEQ(FieldName, v))
|
||||
}
|
||||
|
||||
// URI applies equality check predicate on the "uri" field. It's identical to URIEQ.
|
||||
func URI(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldEQ(FieldURI, v))
|
||||
}
|
||||
|
||||
// Password applies equality check predicate on the "password" field. It's identical to PasswordEQ.
|
||||
func Password(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldEQ(FieldPassword, v))
|
||||
}
|
||||
|
||||
// Options applies equality check predicate on the "options" field. It's identical to OptionsEQ.
|
||||
func Options(v *boolset.BooleanSet) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldEQ(FieldOptions, v))
|
||||
}
|
||||
|
||||
// OwnerID applies equality check predicate on the "owner_id" field. It's identical to OwnerIDEQ.
|
||||
func OwnerID(v int) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldEQ(FieldOwnerID, v))
|
||||
}
|
||||
|
||||
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||
func CreatedAtEQ(v time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||
func CreatedAtNEQ(v time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldNEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||
func CreatedAtIn(vs ...time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||
func CreatedAtNotIn(vs ...time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||
func CreatedAtGT(v time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldGT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||
func CreatedAtGTE(v time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldGTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||
func CreatedAtLT(v time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldLT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||
func CreatedAtLTE(v time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldLTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
|
||||
func UpdatedAtEQ(v time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
|
||||
func UpdatedAtNEQ(v time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldNEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtIn applies the In predicate on the "updated_at" field.
|
||||
func UpdatedAtIn(vs ...time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
|
||||
func UpdatedAtNotIn(vs ...time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldNotIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
|
||||
func UpdatedAtGT(v time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldGT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
|
||||
func UpdatedAtGTE(v time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldGTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
|
||||
func UpdatedAtLT(v time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldLT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
|
||||
func UpdatedAtLTE(v time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldLTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtEQ applies the EQ predicate on the "deleted_at" field.
|
||||
func DeletedAtEQ(v time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldEQ(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field.
|
||||
func DeletedAtNEQ(v time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldNEQ(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtIn applies the In predicate on the "deleted_at" field.
|
||||
func DeletedAtIn(vs ...time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldIn(FieldDeletedAt, vs...))
|
||||
}
|
||||
|
||||
// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field.
|
||||
func DeletedAtNotIn(vs ...time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldNotIn(FieldDeletedAt, vs...))
|
||||
}
|
||||
|
||||
// DeletedAtGT applies the GT predicate on the "deleted_at" field.
|
||||
func DeletedAtGT(v time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldGT(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtGTE applies the GTE predicate on the "deleted_at" field.
|
||||
func DeletedAtGTE(v time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldGTE(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtLT applies the LT predicate on the "deleted_at" field.
|
||||
func DeletedAtLT(v time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldLT(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtLTE applies the LTE predicate on the "deleted_at" field.
|
||||
func DeletedAtLTE(v time.Time) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldLTE(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field.
|
||||
func DeletedAtIsNil() predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldIsNull(FieldDeletedAt))
|
||||
}
|
||||
|
||||
// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field.
|
||||
func DeletedAtNotNil() predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldNotNull(FieldDeletedAt))
|
||||
}
|
||||
|
||||
// NameEQ applies the EQ predicate on the "name" field.
|
||||
func NameEQ(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldEQ(FieldName, v))
|
||||
}
|
||||
|
||||
// NameNEQ applies the NEQ predicate on the "name" field.
|
||||
func NameNEQ(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldNEQ(FieldName, v))
|
||||
}
|
||||
|
||||
// NameIn applies the In predicate on the "name" field.
|
||||
func NameIn(vs ...string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldIn(FieldName, vs...))
|
||||
}
|
||||
|
||||
// NameNotIn applies the NotIn predicate on the "name" field.
|
||||
func NameNotIn(vs ...string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldNotIn(FieldName, vs...))
|
||||
}
|
||||
|
||||
// NameGT applies the GT predicate on the "name" field.
|
||||
func NameGT(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldGT(FieldName, v))
|
||||
}
|
||||
|
||||
// NameGTE applies the GTE predicate on the "name" field.
|
||||
func NameGTE(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldGTE(FieldName, v))
|
||||
}
|
||||
|
||||
// NameLT applies the LT predicate on the "name" field.
|
||||
func NameLT(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldLT(FieldName, v))
|
||||
}
|
||||
|
||||
// NameLTE applies the LTE predicate on the "name" field.
|
||||
func NameLTE(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldLTE(FieldName, v))
|
||||
}
|
||||
|
||||
// NameContains applies the Contains predicate on the "name" field.
|
||||
func NameContains(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldContains(FieldName, v))
|
||||
}
|
||||
|
||||
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
|
||||
func NameHasPrefix(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldHasPrefix(FieldName, v))
|
||||
}
|
||||
|
||||
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
|
||||
func NameHasSuffix(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldHasSuffix(FieldName, v))
|
||||
}
|
||||
|
||||
// NameEqualFold applies the EqualFold predicate on the "name" field.
|
||||
func NameEqualFold(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldEqualFold(FieldName, v))
|
||||
}
|
||||
|
||||
// NameContainsFold applies the ContainsFold predicate on the "name" field.
|
||||
func NameContainsFold(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldContainsFold(FieldName, v))
|
||||
}
|
||||
|
||||
// URIEQ applies the EQ predicate on the "uri" field.
|
||||
func URIEQ(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldEQ(FieldURI, v))
|
||||
}
|
||||
|
||||
// URINEQ applies the NEQ predicate on the "uri" field.
|
||||
func URINEQ(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldNEQ(FieldURI, v))
|
||||
}
|
||||
|
||||
// URIIn applies the In predicate on the "uri" field.
|
||||
func URIIn(vs ...string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldIn(FieldURI, vs...))
|
||||
}
|
||||
|
||||
// URINotIn applies the NotIn predicate on the "uri" field.
|
||||
func URINotIn(vs ...string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldNotIn(FieldURI, vs...))
|
||||
}
|
||||
|
||||
// URIGT applies the GT predicate on the "uri" field.
|
||||
func URIGT(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldGT(FieldURI, v))
|
||||
}
|
||||
|
||||
// URIGTE applies the GTE predicate on the "uri" field.
|
||||
func URIGTE(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldGTE(FieldURI, v))
|
||||
}
|
||||
|
||||
// URILT applies the LT predicate on the "uri" field.
|
||||
func URILT(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldLT(FieldURI, v))
|
||||
}
|
||||
|
||||
// URILTE applies the LTE predicate on the "uri" field.
|
||||
func URILTE(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldLTE(FieldURI, v))
|
||||
}
|
||||
|
||||
// URIContains applies the Contains predicate on the "uri" field.
|
||||
func URIContains(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldContains(FieldURI, v))
|
||||
}
|
||||
|
||||
// URIHasPrefix applies the HasPrefix predicate on the "uri" field.
|
||||
func URIHasPrefix(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldHasPrefix(FieldURI, v))
|
||||
}
|
||||
|
||||
// URIHasSuffix applies the HasSuffix predicate on the "uri" field.
|
||||
func URIHasSuffix(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldHasSuffix(FieldURI, v))
|
||||
}
|
||||
|
||||
// URIEqualFold applies the EqualFold predicate on the "uri" field.
|
||||
func URIEqualFold(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldEqualFold(FieldURI, v))
|
||||
}
|
||||
|
||||
// URIContainsFold applies the ContainsFold predicate on the "uri" field.
|
||||
func URIContainsFold(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldContainsFold(FieldURI, v))
|
||||
}
|
||||
|
||||
// PasswordEQ applies the EQ predicate on the "password" field.
|
||||
func PasswordEQ(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldEQ(FieldPassword, v))
|
||||
}
|
||||
|
||||
// PasswordNEQ applies the NEQ predicate on the "password" field.
|
||||
func PasswordNEQ(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldNEQ(FieldPassword, v))
|
||||
}
|
||||
|
||||
// PasswordIn applies the In predicate on the "password" field.
|
||||
func PasswordIn(vs ...string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldIn(FieldPassword, vs...))
|
||||
}
|
||||
|
||||
// PasswordNotIn applies the NotIn predicate on the "password" field.
|
||||
func PasswordNotIn(vs ...string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldNotIn(FieldPassword, vs...))
|
||||
}
|
||||
|
||||
// PasswordGT applies the GT predicate on the "password" field.
|
||||
func PasswordGT(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldGT(FieldPassword, v))
|
||||
}
|
||||
|
||||
// PasswordGTE applies the GTE predicate on the "password" field.
|
||||
func PasswordGTE(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldGTE(FieldPassword, v))
|
||||
}
|
||||
|
||||
// PasswordLT applies the LT predicate on the "password" field.
|
||||
func PasswordLT(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldLT(FieldPassword, v))
|
||||
}
|
||||
|
||||
// PasswordLTE applies the LTE predicate on the "password" field.
|
||||
func PasswordLTE(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldLTE(FieldPassword, v))
|
||||
}
|
||||
|
||||
// PasswordContains applies the Contains predicate on the "password" field.
|
||||
func PasswordContains(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldContains(FieldPassword, v))
|
||||
}
|
||||
|
||||
// PasswordHasPrefix applies the HasPrefix predicate on the "password" field.
|
||||
func PasswordHasPrefix(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldHasPrefix(FieldPassword, v))
|
||||
}
|
||||
|
||||
// PasswordHasSuffix applies the HasSuffix predicate on the "password" field.
|
||||
func PasswordHasSuffix(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldHasSuffix(FieldPassword, v))
|
||||
}
|
||||
|
||||
// PasswordEqualFold applies the EqualFold predicate on the "password" field.
|
||||
func PasswordEqualFold(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldEqualFold(FieldPassword, v))
|
||||
}
|
||||
|
||||
// PasswordContainsFold applies the ContainsFold predicate on the "password" field.
|
||||
func PasswordContainsFold(v string) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldContainsFold(FieldPassword, v))
|
||||
}
|
||||
|
||||
// OptionsEQ applies the EQ predicate on the "options" field.
|
||||
func OptionsEQ(v *boolset.BooleanSet) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldEQ(FieldOptions, v))
|
||||
}
|
||||
|
||||
// OptionsNEQ applies the NEQ predicate on the "options" field.
|
||||
func OptionsNEQ(v *boolset.BooleanSet) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldNEQ(FieldOptions, v))
|
||||
}
|
||||
|
||||
// OptionsIn applies the In predicate on the "options" field.
|
||||
func OptionsIn(vs ...*boolset.BooleanSet) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldIn(FieldOptions, vs...))
|
||||
}
|
||||
|
||||
// OptionsNotIn applies the NotIn predicate on the "options" field.
|
||||
func OptionsNotIn(vs ...*boolset.BooleanSet) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldNotIn(FieldOptions, vs...))
|
||||
}
|
||||
|
||||
// OptionsGT applies the GT predicate on the "options" field.
|
||||
func OptionsGT(v *boolset.BooleanSet) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldGT(FieldOptions, v))
|
||||
}
|
||||
|
||||
// OptionsGTE applies the GTE predicate on the "options" field.
|
||||
func OptionsGTE(v *boolset.BooleanSet) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldGTE(FieldOptions, v))
|
||||
}
|
||||
|
||||
// OptionsLT applies the LT predicate on the "options" field.
|
||||
func OptionsLT(v *boolset.BooleanSet) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldLT(FieldOptions, v))
|
||||
}
|
||||
|
||||
// OptionsLTE applies the LTE predicate on the "options" field.
|
||||
func OptionsLTE(v *boolset.BooleanSet) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldLTE(FieldOptions, v))
|
||||
}
|
||||
|
||||
// PropsIsNil applies the IsNil predicate on the "props" field.
|
||||
func PropsIsNil() predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldIsNull(FieldProps))
|
||||
}
|
||||
|
||||
// PropsNotNil applies the NotNil predicate on the "props" field.
|
||||
func PropsNotNil() predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldNotNull(FieldProps))
|
||||
}
|
||||
|
||||
// OwnerIDEQ applies the EQ predicate on the "owner_id" field.
|
||||
func OwnerIDEQ(v int) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldEQ(FieldOwnerID, v))
|
||||
}
|
||||
|
||||
// OwnerIDNEQ applies the NEQ predicate on the "owner_id" field.
|
||||
func OwnerIDNEQ(v int) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldNEQ(FieldOwnerID, v))
|
||||
}
|
||||
|
||||
// OwnerIDIn applies the In predicate on the "owner_id" field.
|
||||
func OwnerIDIn(vs ...int) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldIn(FieldOwnerID, vs...))
|
||||
}
|
||||
|
||||
// OwnerIDNotIn applies the NotIn predicate on the "owner_id" field.
|
||||
func OwnerIDNotIn(vs ...int) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.FieldNotIn(FieldOwnerID, vs...))
|
||||
}
|
||||
|
||||
// HasOwner applies the HasEdge predicate on the "owner" edge.
|
||||
func HasOwner() predicate.DavAccount {
|
||||
return predicate.DavAccount(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates).
|
||||
func HasOwnerWith(preds ...predicate.User) predicate.DavAccount {
|
||||
return predicate.DavAccount(func(s *sql.Selector) {
|
||||
step := newOwnerStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.DavAccount) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.DavAccount) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.DavAccount) predicate.DavAccount {
|
||||
return predicate.DavAccount(sql.NotPredicates(p))
|
||||
}
|
||||
968
ent/davaccount_create.go
Normal file
968
ent/davaccount_create.go
Normal file
@@ -0,0 +1,968 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/user"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
)
|
||||
|
||||
// DavAccountCreate is the builder for creating a DavAccount entity.
|
||||
type DavAccountCreate struct {
|
||||
config
|
||||
mutation *DavAccountMutation
|
||||
hooks []Hook
|
||||
conflict []sql.ConflictOption
|
||||
}
|
||||
|
||||
// SetCreatedAt sets the "created_at" field.
|
||||
func (dac *DavAccountCreate) SetCreatedAt(t time.Time) *DavAccountCreate {
|
||||
dac.mutation.SetCreatedAt(t)
|
||||
return dac
|
||||
}
|
||||
|
||||
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
|
||||
func (dac *DavAccountCreate) SetNillableCreatedAt(t *time.Time) *DavAccountCreate {
|
||||
if t != nil {
|
||||
dac.SetCreatedAt(*t)
|
||||
}
|
||||
return dac
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (dac *DavAccountCreate) SetUpdatedAt(t time.Time) *DavAccountCreate {
|
||||
dac.mutation.SetUpdatedAt(t)
|
||||
return dac
|
||||
}
|
||||
|
||||
// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
|
||||
func (dac *DavAccountCreate) SetNillableUpdatedAt(t *time.Time) *DavAccountCreate {
|
||||
if t != nil {
|
||||
dac.SetUpdatedAt(*t)
|
||||
}
|
||||
return dac
|
||||
}
|
||||
|
||||
// SetDeletedAt sets the "deleted_at" field.
|
||||
func (dac *DavAccountCreate) SetDeletedAt(t time.Time) *DavAccountCreate {
|
||||
dac.mutation.SetDeletedAt(t)
|
||||
return dac
|
||||
}
|
||||
|
||||
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
|
||||
func (dac *DavAccountCreate) SetNillableDeletedAt(t *time.Time) *DavAccountCreate {
|
||||
if t != nil {
|
||||
dac.SetDeletedAt(*t)
|
||||
}
|
||||
return dac
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (dac *DavAccountCreate) SetName(s string) *DavAccountCreate {
|
||||
dac.mutation.SetName(s)
|
||||
return dac
|
||||
}
|
||||
|
||||
// SetURI sets the "uri" field.
|
||||
func (dac *DavAccountCreate) SetURI(s string) *DavAccountCreate {
|
||||
dac.mutation.SetURI(s)
|
||||
return dac
|
||||
}
|
||||
|
||||
// SetPassword sets the "password" field.
|
||||
func (dac *DavAccountCreate) SetPassword(s string) *DavAccountCreate {
|
||||
dac.mutation.SetPassword(s)
|
||||
return dac
|
||||
}
|
||||
|
||||
// SetOptions sets the "options" field.
|
||||
func (dac *DavAccountCreate) SetOptions(bs *boolset.BooleanSet) *DavAccountCreate {
|
||||
dac.mutation.SetOptions(bs)
|
||||
return dac
|
||||
}
|
||||
|
||||
// SetProps sets the "props" field.
|
||||
func (dac *DavAccountCreate) SetProps(tap *types.DavAccountProps) *DavAccountCreate {
|
||||
dac.mutation.SetProps(tap)
|
||||
return dac
|
||||
}
|
||||
|
||||
// SetOwnerID sets the "owner_id" field.
|
||||
func (dac *DavAccountCreate) SetOwnerID(i int) *DavAccountCreate {
|
||||
dac.mutation.SetOwnerID(i)
|
||||
return dac
|
||||
}
|
||||
|
||||
// SetOwner sets the "owner" edge to the User entity.
|
||||
func (dac *DavAccountCreate) SetOwner(u *User) *DavAccountCreate {
|
||||
return dac.SetOwnerID(u.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the DavAccountMutation object of the builder.
|
||||
func (dac *DavAccountCreate) Mutation() *DavAccountMutation {
|
||||
return dac.mutation
|
||||
}
|
||||
|
||||
// Save creates the DavAccount in the database.
|
||||
func (dac *DavAccountCreate) Save(ctx context.Context) (*DavAccount, error) {
|
||||
if err := dac.defaults(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return withHooks(ctx, dac.sqlSave, dac.mutation, dac.hooks)
|
||||
}
|
||||
|
||||
// SaveX calls Save and panics if Save returns an error.
|
||||
func (dac *DavAccountCreate) SaveX(ctx context.Context) *DavAccount {
|
||||
v, err := dac.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (dac *DavAccountCreate) Exec(ctx context.Context) error {
|
||||
_, err := dac.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (dac *DavAccountCreate) ExecX(ctx context.Context) {
|
||||
if err := dac.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (dac *DavAccountCreate) defaults() error {
|
||||
if _, ok := dac.mutation.CreatedAt(); !ok {
|
||||
if davaccount.DefaultCreatedAt == nil {
|
||||
return fmt.Errorf("ent: uninitialized davaccount.DefaultCreatedAt (forgotten import ent/runtime?)")
|
||||
}
|
||||
v := davaccount.DefaultCreatedAt()
|
||||
dac.mutation.SetCreatedAt(v)
|
||||
}
|
||||
if _, ok := dac.mutation.UpdatedAt(); !ok {
|
||||
if davaccount.DefaultUpdatedAt == nil {
|
||||
return fmt.Errorf("ent: uninitialized davaccount.DefaultUpdatedAt (forgotten import ent/runtime?)")
|
||||
}
|
||||
v := davaccount.DefaultUpdatedAt()
|
||||
dac.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (dac *DavAccountCreate) check() error {
|
||||
if _, ok := dac.mutation.CreatedAt(); !ok {
|
||||
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "DavAccount.created_at"`)}
|
||||
}
|
||||
if _, ok := dac.mutation.UpdatedAt(); !ok {
|
||||
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "DavAccount.updated_at"`)}
|
||||
}
|
||||
if _, ok := dac.mutation.Name(); !ok {
|
||||
return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "DavAccount.name"`)}
|
||||
}
|
||||
if _, ok := dac.mutation.URI(); !ok {
|
||||
return &ValidationError{Name: "uri", err: errors.New(`ent: missing required field "DavAccount.uri"`)}
|
||||
}
|
||||
if _, ok := dac.mutation.Password(); !ok {
|
||||
return &ValidationError{Name: "password", err: errors.New(`ent: missing required field "DavAccount.password"`)}
|
||||
}
|
||||
if _, ok := dac.mutation.Options(); !ok {
|
||||
return &ValidationError{Name: "options", err: errors.New(`ent: missing required field "DavAccount.options"`)}
|
||||
}
|
||||
if _, ok := dac.mutation.OwnerID(); !ok {
|
||||
return &ValidationError{Name: "owner_id", err: errors.New(`ent: missing required field "DavAccount.owner_id"`)}
|
||||
}
|
||||
if _, ok := dac.mutation.OwnerID(); !ok {
|
||||
return &ValidationError{Name: "owner", err: errors.New(`ent: missing required edge "DavAccount.owner"`)}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dac *DavAccountCreate) sqlSave(ctx context.Context) (*DavAccount, error) {
|
||||
if err := dac.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_node, _spec := dac.createSpec()
|
||||
if err := sqlgraph.CreateNode(ctx, dac.driver, _spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
id := _spec.ID.Value.(int64)
|
||||
_node.ID = int(id)
|
||||
dac.mutation.id = &_node.ID
|
||||
dac.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
|
||||
func (dac *DavAccountCreate) createSpec() (*DavAccount, *sqlgraph.CreateSpec) {
|
||||
var (
|
||||
_node = &DavAccount{config: dac.config}
|
||||
_spec = sqlgraph.NewCreateSpec(davaccount.Table, sqlgraph.NewFieldSpec(davaccount.FieldID, field.TypeInt))
|
||||
)
|
||||
|
||||
if id, ok := dac.mutation.ID(); ok {
|
||||
_node.ID = id
|
||||
id64 := int64(id)
|
||||
_spec.ID.Value = id64
|
||||
}
|
||||
|
||||
_spec.OnConflict = dac.conflict
|
||||
if value, ok := dac.mutation.CreatedAt(); ok {
|
||||
_spec.SetField(davaccount.FieldCreatedAt, field.TypeTime, value)
|
||||
_node.CreatedAt = value
|
||||
}
|
||||
if value, ok := dac.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(davaccount.FieldUpdatedAt, field.TypeTime, value)
|
||||
_node.UpdatedAt = value
|
||||
}
|
||||
if value, ok := dac.mutation.DeletedAt(); ok {
|
||||
_spec.SetField(davaccount.FieldDeletedAt, field.TypeTime, value)
|
||||
_node.DeletedAt = &value
|
||||
}
|
||||
if value, ok := dac.mutation.Name(); ok {
|
||||
_spec.SetField(davaccount.FieldName, field.TypeString, value)
|
||||
_node.Name = value
|
||||
}
|
||||
if value, ok := dac.mutation.URI(); ok {
|
||||
_spec.SetField(davaccount.FieldURI, field.TypeString, value)
|
||||
_node.URI = value
|
||||
}
|
||||
if value, ok := dac.mutation.Password(); ok {
|
||||
_spec.SetField(davaccount.FieldPassword, field.TypeString, value)
|
||||
_node.Password = value
|
||||
}
|
||||
if value, ok := dac.mutation.Options(); ok {
|
||||
_spec.SetField(davaccount.FieldOptions, field.TypeBytes, value)
|
||||
_node.Options = value
|
||||
}
|
||||
if value, ok := dac.mutation.Props(); ok {
|
||||
_spec.SetField(davaccount.FieldProps, field.TypeJSON, value)
|
||||
_node.Props = value
|
||||
}
|
||||
if nodes := dac.mutation.OwnerIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: davaccount.OwnerTable,
|
||||
Columns: []string{davaccount.OwnerColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_node.OwnerID = nodes[0]
|
||||
_spec.Edges = append(_spec.Edges, edge)
|
||||
}
|
||||
return _node, _spec
|
||||
}
|
||||
|
||||
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||
// of the `INSERT` statement. For example:
|
||||
//
|
||||
// client.DavAccount.Create().
|
||||
// SetCreatedAt(v).
|
||||
// OnConflict(
|
||||
// // Update the row with the new values
|
||||
// // the was proposed for insertion.
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// // Override some of the fields with custom
|
||||
// // update values.
|
||||
// Update(func(u *ent.DavAccountUpsert) {
|
||||
// SetCreatedAt(v+v).
|
||||
// }).
|
||||
// Exec(ctx)
|
||||
func (dac *DavAccountCreate) OnConflict(opts ...sql.ConflictOption) *DavAccountUpsertOne {
|
||||
dac.conflict = opts
|
||||
return &DavAccountUpsertOne{
|
||||
create: dac,
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||
// as conflict target. Using this option is equivalent to using:
|
||||
//
|
||||
// client.DavAccount.Create().
|
||||
// OnConflict(sql.ConflictColumns(columns...)).
|
||||
// Exec(ctx)
|
||||
func (dac *DavAccountCreate) OnConflictColumns(columns ...string) *DavAccountUpsertOne {
|
||||
dac.conflict = append(dac.conflict, sql.ConflictColumns(columns...))
|
||||
return &DavAccountUpsertOne{
|
||||
create: dac,
|
||||
}
|
||||
}
|
||||
|
||||
type (
|
||||
// DavAccountUpsertOne is the builder for "upsert"-ing
|
||||
// one DavAccount node.
|
||||
DavAccountUpsertOne struct {
|
||||
create *DavAccountCreate
|
||||
}
|
||||
|
||||
// DavAccountUpsert is the "OnConflict" setter.
|
||||
DavAccountUpsert struct {
|
||||
*sql.UpdateSet
|
||||
}
|
||||
)
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (u *DavAccountUpsert) SetUpdatedAt(v time.Time) *DavAccountUpsert {
|
||||
u.Set(davaccount.FieldUpdatedAt, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||
func (u *DavAccountUpsert) UpdateUpdatedAt() *DavAccountUpsert {
|
||||
u.SetExcluded(davaccount.FieldUpdatedAt)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetDeletedAt sets the "deleted_at" field.
|
||||
func (u *DavAccountUpsert) SetDeletedAt(v time.Time) *DavAccountUpsert {
|
||||
u.Set(davaccount.FieldDeletedAt, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
|
||||
func (u *DavAccountUpsert) UpdateDeletedAt() *DavAccountUpsert {
|
||||
u.SetExcluded(davaccount.FieldDeletedAt)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearDeletedAt clears the value of the "deleted_at" field.
|
||||
func (u *DavAccountUpsert) ClearDeletedAt() *DavAccountUpsert {
|
||||
u.SetNull(davaccount.FieldDeletedAt)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (u *DavAccountUpsert) SetName(v string) *DavAccountUpsert {
|
||||
u.Set(davaccount.FieldName, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateName sets the "name" field to the value that was provided on create.
|
||||
func (u *DavAccountUpsert) UpdateName() *DavAccountUpsert {
|
||||
u.SetExcluded(davaccount.FieldName)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetURI sets the "uri" field.
|
||||
func (u *DavAccountUpsert) SetURI(v string) *DavAccountUpsert {
|
||||
u.Set(davaccount.FieldURI, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateURI sets the "uri" field to the value that was provided on create.
|
||||
func (u *DavAccountUpsert) UpdateURI() *DavAccountUpsert {
|
||||
u.SetExcluded(davaccount.FieldURI)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetPassword sets the "password" field.
|
||||
func (u *DavAccountUpsert) SetPassword(v string) *DavAccountUpsert {
|
||||
u.Set(davaccount.FieldPassword, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdatePassword sets the "password" field to the value that was provided on create.
|
||||
func (u *DavAccountUpsert) UpdatePassword() *DavAccountUpsert {
|
||||
u.SetExcluded(davaccount.FieldPassword)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetOptions sets the "options" field.
|
||||
func (u *DavAccountUpsert) SetOptions(v *boolset.BooleanSet) *DavAccountUpsert {
|
||||
u.Set(davaccount.FieldOptions, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateOptions sets the "options" field to the value that was provided on create.
|
||||
func (u *DavAccountUpsert) UpdateOptions() *DavAccountUpsert {
|
||||
u.SetExcluded(davaccount.FieldOptions)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetProps sets the "props" field.
|
||||
func (u *DavAccountUpsert) SetProps(v *types.DavAccountProps) *DavAccountUpsert {
|
||||
u.Set(davaccount.FieldProps, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateProps sets the "props" field to the value that was provided on create.
|
||||
func (u *DavAccountUpsert) UpdateProps() *DavAccountUpsert {
|
||||
u.SetExcluded(davaccount.FieldProps)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearProps clears the value of the "props" field.
|
||||
func (u *DavAccountUpsert) ClearProps() *DavAccountUpsert {
|
||||
u.SetNull(davaccount.FieldProps)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetOwnerID sets the "owner_id" field.
|
||||
func (u *DavAccountUpsert) SetOwnerID(v int) *DavAccountUpsert {
|
||||
u.Set(davaccount.FieldOwnerID, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateOwnerID sets the "owner_id" field to the value that was provided on create.
|
||||
func (u *DavAccountUpsert) UpdateOwnerID() *DavAccountUpsert {
|
||||
u.SetExcluded(davaccount.FieldOwnerID)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.DavAccount.Create().
|
||||
// OnConflict(
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// Exec(ctx)
|
||||
func (u *DavAccountUpsertOne) UpdateNewValues() *DavAccountUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||
if _, exists := u.create.mutation.CreatedAt(); exists {
|
||||
s.SetIgnore(davaccount.FieldCreatedAt)
|
||||
}
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// Ignore sets each column to itself in case of conflict.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.DavAccount.Create().
|
||||
// OnConflict(sql.ResolveWithIgnore()).
|
||||
// Exec(ctx)
|
||||
func (u *DavAccountUpsertOne) Ignore() *DavAccountUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||
return u
|
||||
}
|
||||
|
||||
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||
// Supported only by SQLite and PostgreSQL.
|
||||
func (u *DavAccountUpsertOne) DoNothing() *DavAccountUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||
return u
|
||||
}
|
||||
|
||||
// Update allows overriding fields `UPDATE` values. See the DavAccountCreate.OnConflict
|
||||
// documentation for more info.
|
||||
func (u *DavAccountUpsertOne) Update(set func(*DavAccountUpsert)) *DavAccountUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||
set(&DavAccountUpsert{UpdateSet: update})
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (u *DavAccountUpsertOne) SetUpdatedAt(v time.Time) *DavAccountUpsertOne {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.SetUpdatedAt(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||
func (u *DavAccountUpsertOne) UpdateUpdatedAt() *DavAccountUpsertOne {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.UpdateUpdatedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// SetDeletedAt sets the "deleted_at" field.
|
||||
func (u *DavAccountUpsertOne) SetDeletedAt(v time.Time) *DavAccountUpsertOne {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.SetDeletedAt(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
|
||||
func (u *DavAccountUpsertOne) UpdateDeletedAt() *DavAccountUpsertOne {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.UpdateDeletedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearDeletedAt clears the value of the "deleted_at" field.
|
||||
func (u *DavAccountUpsertOne) ClearDeletedAt() *DavAccountUpsertOne {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.ClearDeletedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (u *DavAccountUpsertOne) SetName(v string) *DavAccountUpsertOne {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.SetName(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateName sets the "name" field to the value that was provided on create.
|
||||
func (u *DavAccountUpsertOne) UpdateName() *DavAccountUpsertOne {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.UpdateName()
|
||||
})
|
||||
}
|
||||
|
||||
// SetURI sets the "uri" field.
|
||||
func (u *DavAccountUpsertOne) SetURI(v string) *DavAccountUpsertOne {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.SetURI(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateURI sets the "uri" field to the value that was provided on create.
|
||||
func (u *DavAccountUpsertOne) UpdateURI() *DavAccountUpsertOne {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.UpdateURI()
|
||||
})
|
||||
}
|
||||
|
||||
// SetPassword sets the "password" field.
|
||||
func (u *DavAccountUpsertOne) SetPassword(v string) *DavAccountUpsertOne {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.SetPassword(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdatePassword sets the "password" field to the value that was provided on create.
|
||||
func (u *DavAccountUpsertOne) UpdatePassword() *DavAccountUpsertOne {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.UpdatePassword()
|
||||
})
|
||||
}
|
||||
|
||||
// SetOptions sets the "options" field.
|
||||
func (u *DavAccountUpsertOne) SetOptions(v *boolset.BooleanSet) *DavAccountUpsertOne {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.SetOptions(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateOptions sets the "options" field to the value that was provided on create.
|
||||
func (u *DavAccountUpsertOne) UpdateOptions() *DavAccountUpsertOne {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.UpdateOptions()
|
||||
})
|
||||
}
|
||||
|
||||
// SetProps sets the "props" field.
|
||||
func (u *DavAccountUpsertOne) SetProps(v *types.DavAccountProps) *DavAccountUpsertOne {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.SetProps(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateProps sets the "props" field to the value that was provided on create.
|
||||
func (u *DavAccountUpsertOne) UpdateProps() *DavAccountUpsertOne {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.UpdateProps()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearProps clears the value of the "props" field.
|
||||
func (u *DavAccountUpsertOne) ClearProps() *DavAccountUpsertOne {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.ClearProps()
|
||||
})
|
||||
}
|
||||
|
||||
// SetOwnerID sets the "owner_id" field.
|
||||
func (u *DavAccountUpsertOne) SetOwnerID(v int) *DavAccountUpsertOne {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.SetOwnerID(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateOwnerID sets the "owner_id" field to the value that was provided on create.
|
||||
func (u *DavAccountUpsertOne) UpdateOwnerID() *DavAccountUpsertOne {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.UpdateOwnerID()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *DavAccountUpsertOne) Exec(ctx context.Context) error {
|
||||
if len(u.create.conflict) == 0 {
|
||||
return errors.New("ent: missing options for DavAccountCreate.OnConflict")
|
||||
}
|
||||
return u.create.Exec(ctx)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (u *DavAccountUpsertOne) ExecX(ctx context.Context) {
|
||||
if err := u.create.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Exec executes the UPSERT query and returns the inserted/updated ID.
|
||||
func (u *DavAccountUpsertOne) ID(ctx context.Context) (id int, err error) {
|
||||
node, err := u.create.Save(ctx)
|
||||
if err != nil {
|
||||
return id, err
|
||||
}
|
||||
return node.ID, nil
|
||||
}
|
||||
|
||||
// IDX is like ID, but panics if an error occurs.
|
||||
func (u *DavAccountUpsertOne) IDX(ctx context.Context) int {
|
||||
id, err := u.ID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
func (m *DavAccountCreate) SetRawID(t int) *DavAccountCreate {
|
||||
m.mutation.SetRawID(t)
|
||||
return m
|
||||
}
|
||||
|
||||
// DavAccountCreateBulk is the builder for creating many DavAccount entities in bulk.
|
||||
type DavAccountCreateBulk struct {
|
||||
config
|
||||
err error
|
||||
builders []*DavAccountCreate
|
||||
conflict []sql.ConflictOption
|
||||
}
|
||||
|
||||
// Save creates the DavAccount entities in the database.
|
||||
func (dacb *DavAccountCreateBulk) Save(ctx context.Context) ([]*DavAccount, error) {
|
||||
if dacb.err != nil {
|
||||
return nil, dacb.err
|
||||
}
|
||||
specs := make([]*sqlgraph.CreateSpec, len(dacb.builders))
|
||||
nodes := make([]*DavAccount, len(dacb.builders))
|
||||
mutators := make([]Mutator, len(dacb.builders))
|
||||
for i := range dacb.builders {
|
||||
func(i int, root context.Context) {
|
||||
builder := dacb.builders[i]
|
||||
builder.defaults()
|
||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||
mutation, ok := m.(*DavAccountMutation)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||
}
|
||||
if err := builder.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
builder.mutation = mutation
|
||||
var err error
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, dacb.builders[i+1].mutation)
|
||||
} else {
|
||||
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||
spec.OnConflict = dacb.conflict
|
||||
// Invoke the actual operation on the latest mutation in the chain.
|
||||
if err = sqlgraph.BatchCreate(ctx, dacb.driver, spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mutation.id = &nodes[i].ID
|
||||
if specs[i].ID.Value != nil {
|
||||
id := specs[i].ID.Value.(int64)
|
||||
nodes[i].ID = int(id)
|
||||
}
|
||||
mutation.done = true
|
||||
return nodes[i], nil
|
||||
})
|
||||
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||
mut = builder.hooks[i](mut)
|
||||
}
|
||||
mutators[i] = mut
|
||||
}(i, ctx)
|
||||
}
|
||||
if len(mutators) > 0 {
|
||||
if _, err := mutators[0].Mutate(ctx, dacb.builders[0].mutation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (dacb *DavAccountCreateBulk) SaveX(ctx context.Context) []*DavAccount {
|
||||
v, err := dacb.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (dacb *DavAccountCreateBulk) Exec(ctx context.Context) error {
|
||||
_, err := dacb.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (dacb *DavAccountCreateBulk) ExecX(ctx context.Context) {
|
||||
if err := dacb.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||
// of the `INSERT` statement. For example:
|
||||
//
|
||||
// client.DavAccount.CreateBulk(builders...).
|
||||
// OnConflict(
|
||||
// // Update the row with the new values
|
||||
// // the was proposed for insertion.
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// // Override some of the fields with custom
|
||||
// // update values.
|
||||
// Update(func(u *ent.DavAccountUpsert) {
|
||||
// SetCreatedAt(v+v).
|
||||
// }).
|
||||
// Exec(ctx)
|
||||
func (dacb *DavAccountCreateBulk) OnConflict(opts ...sql.ConflictOption) *DavAccountUpsertBulk {
|
||||
dacb.conflict = opts
|
||||
return &DavAccountUpsertBulk{
|
||||
create: dacb,
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||
// as conflict target. Using this option is equivalent to using:
|
||||
//
|
||||
// client.DavAccount.Create().
|
||||
// OnConflict(sql.ConflictColumns(columns...)).
|
||||
// Exec(ctx)
|
||||
func (dacb *DavAccountCreateBulk) OnConflictColumns(columns ...string) *DavAccountUpsertBulk {
|
||||
dacb.conflict = append(dacb.conflict, sql.ConflictColumns(columns...))
|
||||
return &DavAccountUpsertBulk{
|
||||
create: dacb,
|
||||
}
|
||||
}
|
||||
|
||||
// DavAccountUpsertBulk is the builder for "upsert"-ing
|
||||
// a bulk of DavAccount nodes.
|
||||
type DavAccountUpsertBulk struct {
|
||||
create *DavAccountCreateBulk
|
||||
}
|
||||
|
||||
// UpdateNewValues updates the mutable fields using the new values that
|
||||
// were set on create. Using this option is equivalent to using:
|
||||
//
|
||||
// client.DavAccount.Create().
|
||||
// OnConflict(
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// Exec(ctx)
|
||||
func (u *DavAccountUpsertBulk) UpdateNewValues() *DavAccountUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||
for _, b := range u.create.builders {
|
||||
if _, exists := b.mutation.CreatedAt(); exists {
|
||||
s.SetIgnore(davaccount.FieldCreatedAt)
|
||||
}
|
||||
}
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// Ignore sets each column to itself in case of conflict.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.DavAccount.Create().
|
||||
// OnConflict(sql.ResolveWithIgnore()).
|
||||
// Exec(ctx)
|
||||
func (u *DavAccountUpsertBulk) Ignore() *DavAccountUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||
return u
|
||||
}
|
||||
|
||||
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||
// Supported only by SQLite and PostgreSQL.
|
||||
func (u *DavAccountUpsertBulk) DoNothing() *DavAccountUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||
return u
|
||||
}
|
||||
|
||||
// Update allows overriding fields `UPDATE` values. See the DavAccountCreateBulk.OnConflict
|
||||
// documentation for more info.
|
||||
func (u *DavAccountUpsertBulk) Update(set func(*DavAccountUpsert)) *DavAccountUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||
set(&DavAccountUpsert{UpdateSet: update})
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (u *DavAccountUpsertBulk) SetUpdatedAt(v time.Time) *DavAccountUpsertBulk {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.SetUpdatedAt(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||
func (u *DavAccountUpsertBulk) UpdateUpdatedAt() *DavAccountUpsertBulk {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.UpdateUpdatedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// SetDeletedAt sets the "deleted_at" field.
|
||||
func (u *DavAccountUpsertBulk) SetDeletedAt(v time.Time) *DavAccountUpsertBulk {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.SetDeletedAt(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
|
||||
func (u *DavAccountUpsertBulk) UpdateDeletedAt() *DavAccountUpsertBulk {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.UpdateDeletedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearDeletedAt clears the value of the "deleted_at" field.
|
||||
func (u *DavAccountUpsertBulk) ClearDeletedAt() *DavAccountUpsertBulk {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.ClearDeletedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (u *DavAccountUpsertBulk) SetName(v string) *DavAccountUpsertBulk {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.SetName(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateName sets the "name" field to the value that was provided on create.
|
||||
func (u *DavAccountUpsertBulk) UpdateName() *DavAccountUpsertBulk {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.UpdateName()
|
||||
})
|
||||
}
|
||||
|
||||
// SetURI sets the "uri" field.
|
||||
func (u *DavAccountUpsertBulk) SetURI(v string) *DavAccountUpsertBulk {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.SetURI(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateURI sets the "uri" field to the value that was provided on create.
|
||||
func (u *DavAccountUpsertBulk) UpdateURI() *DavAccountUpsertBulk {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.UpdateURI()
|
||||
})
|
||||
}
|
||||
|
||||
// SetPassword sets the "password" field.
|
||||
func (u *DavAccountUpsertBulk) SetPassword(v string) *DavAccountUpsertBulk {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.SetPassword(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdatePassword sets the "password" field to the value that was provided on create.
|
||||
func (u *DavAccountUpsertBulk) UpdatePassword() *DavAccountUpsertBulk {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.UpdatePassword()
|
||||
})
|
||||
}
|
||||
|
||||
// SetOptions sets the "options" field.
|
||||
func (u *DavAccountUpsertBulk) SetOptions(v *boolset.BooleanSet) *DavAccountUpsertBulk {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.SetOptions(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateOptions sets the "options" field to the value that was provided on create.
|
||||
func (u *DavAccountUpsertBulk) UpdateOptions() *DavAccountUpsertBulk {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.UpdateOptions()
|
||||
})
|
||||
}
|
||||
|
||||
// SetProps sets the "props" field.
|
||||
func (u *DavAccountUpsertBulk) SetProps(v *types.DavAccountProps) *DavAccountUpsertBulk {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.SetProps(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateProps sets the "props" field to the value that was provided on create.
|
||||
func (u *DavAccountUpsertBulk) UpdateProps() *DavAccountUpsertBulk {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.UpdateProps()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearProps clears the value of the "props" field.
|
||||
func (u *DavAccountUpsertBulk) ClearProps() *DavAccountUpsertBulk {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.ClearProps()
|
||||
})
|
||||
}
|
||||
|
||||
// SetOwnerID sets the "owner_id" field.
|
||||
func (u *DavAccountUpsertBulk) SetOwnerID(v int) *DavAccountUpsertBulk {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.SetOwnerID(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateOwnerID sets the "owner_id" field to the value that was provided on create.
|
||||
func (u *DavAccountUpsertBulk) UpdateOwnerID() *DavAccountUpsertBulk {
|
||||
return u.Update(func(s *DavAccountUpsert) {
|
||||
s.UpdateOwnerID()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *DavAccountUpsertBulk) Exec(ctx context.Context) error {
|
||||
if u.create.err != nil {
|
||||
return u.create.err
|
||||
}
|
||||
for i, b := range u.create.builders {
|
||||
if len(b.conflict) != 0 {
|
||||
return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the DavAccountCreateBulk instead", i)
|
||||
}
|
||||
}
|
||||
if len(u.create.conflict) == 0 {
|
||||
return errors.New("ent: missing options for DavAccountCreateBulk.OnConflict")
|
||||
}
|
||||
return u.create.Exec(ctx)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (u *DavAccountUpsertBulk) ExecX(ctx context.Context) {
|
||||
if err := u.create.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
88
ent/davaccount_delete.go
Normal file
88
ent/davaccount_delete.go
Normal file
@@ -0,0 +1,88 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
|
||||
)
|
||||
|
||||
// DavAccountDelete is the builder for deleting a DavAccount entity.
|
||||
type DavAccountDelete struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *DavAccountMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the DavAccountDelete builder.
|
||||
func (dad *DavAccountDelete) Where(ps ...predicate.DavAccount) *DavAccountDelete {
|
||||
dad.mutation.Where(ps...)
|
||||
return dad
|
||||
}
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (dad *DavAccountDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, dad.sqlExec, dad.mutation, dad.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (dad *DavAccountDelete) ExecX(ctx context.Context) int {
|
||||
n, err := dad.Exec(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (dad *DavAccountDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
_spec := sqlgraph.NewDeleteSpec(davaccount.Table, sqlgraph.NewFieldSpec(davaccount.FieldID, field.TypeInt))
|
||||
if ps := dad.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
affected, err := sqlgraph.DeleteNodes(ctx, dad.driver, _spec)
|
||||
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
dad.mutation.done = true
|
||||
return affected, err
|
||||
}
|
||||
|
||||
// DavAccountDeleteOne is the builder for deleting a single DavAccount entity.
|
||||
type DavAccountDeleteOne struct {
|
||||
dad *DavAccountDelete
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the DavAccountDelete builder.
|
||||
func (dado *DavAccountDeleteOne) Where(ps ...predicate.DavAccount) *DavAccountDeleteOne {
|
||||
dado.dad.mutation.Where(ps...)
|
||||
return dado
|
||||
}
|
||||
|
||||
// Exec executes the deletion query.
|
||||
func (dado *DavAccountDeleteOne) Exec(ctx context.Context) error {
|
||||
n, err := dado.dad.Exec(ctx)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case n == 0:
|
||||
return &NotFoundError{davaccount.Label}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (dado *DavAccountDeleteOne) ExecX(ctx context.Context) {
|
||||
if err := dado.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
605
ent/davaccount_query.go
Normal file
605
ent/davaccount_query.go
Normal file
@@ -0,0 +1,605 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/user"
|
||||
)
|
||||
|
||||
// DavAccountQuery is the builder for querying DavAccount entities.
|
||||
type DavAccountQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []davaccount.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.DavAccount
|
||||
withOwner *UserQuery
|
||||
// intermediate query (i.e. traversal path).
|
||||
sql *sql.Selector
|
||||
path func(context.Context) (*sql.Selector, error)
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the DavAccountQuery builder.
|
||||
func (daq *DavAccountQuery) Where(ps ...predicate.DavAccount) *DavAccountQuery {
|
||||
daq.predicates = append(daq.predicates, ps...)
|
||||
return daq
|
||||
}
|
||||
|
||||
// Limit the number of records to be returned by this query.
|
||||
func (daq *DavAccountQuery) Limit(limit int) *DavAccountQuery {
|
||||
daq.ctx.Limit = &limit
|
||||
return daq
|
||||
}
|
||||
|
||||
// Offset to start from.
|
||||
func (daq *DavAccountQuery) Offset(offset int) *DavAccountQuery {
|
||||
daq.ctx.Offset = &offset
|
||||
return daq
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (daq *DavAccountQuery) Unique(unique bool) *DavAccountQuery {
|
||||
daq.ctx.Unique = &unique
|
||||
return daq
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (daq *DavAccountQuery) Order(o ...davaccount.OrderOption) *DavAccountQuery {
|
||||
daq.order = append(daq.order, o...)
|
||||
return daq
|
||||
}
|
||||
|
||||
// QueryOwner chains the current query on the "owner" edge.
|
||||
func (daq *DavAccountQuery) QueryOwner() *UserQuery {
|
||||
query := (&UserClient{config: daq.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := daq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := daq.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(davaccount.Table, davaccount.FieldID, selector),
|
||||
sqlgraph.To(user.Table, user.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, davaccount.OwnerTable, davaccount.OwnerColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(daq.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// First returns the first DavAccount entity from the query.
|
||||
// Returns a *NotFoundError when no DavAccount was found.
|
||||
func (daq *DavAccountQuery) First(ctx context.Context) (*DavAccount, error) {
|
||||
nodes, err := daq.Limit(1).All(setContextOp(ctx, daq.ctx, "First"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nil, &NotFoundError{davaccount.Label}
|
||||
}
|
||||
return nodes[0], nil
|
||||
}
|
||||
|
||||
// FirstX is like First, but panics if an error occurs.
|
||||
func (daq *DavAccountQuery) FirstX(ctx context.Context) *DavAccount {
|
||||
node, err := daq.First(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// FirstID returns the first DavAccount ID from the query.
|
||||
// Returns a *NotFoundError when no DavAccount ID was found.
|
||||
func (daq *DavAccountQuery) FirstID(ctx context.Context) (id int, err error) {
|
||||
var ids []int
|
||||
if ids, err = daq.Limit(1).IDs(setContextOp(ctx, daq.ctx, "FirstID")); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
err = &NotFoundError{davaccount.Label}
|
||||
return
|
||||
}
|
||||
return ids[0], nil
|
||||
}
|
||||
|
||||
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||
func (daq *DavAccountQuery) FirstIDX(ctx context.Context) int {
|
||||
id, err := daq.FirstID(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// Only returns a single DavAccount entity found by the query, ensuring it only returns one.
|
||||
// Returns a *NotSingularError when more than one DavAccount entity is found.
|
||||
// Returns a *NotFoundError when no DavAccount entities are found.
|
||||
func (daq *DavAccountQuery) Only(ctx context.Context) (*DavAccount, error) {
|
||||
nodes, err := daq.Limit(2).All(setContextOp(ctx, daq.ctx, "Only"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(nodes) {
|
||||
case 1:
|
||||
return nodes[0], nil
|
||||
case 0:
|
||||
return nil, &NotFoundError{davaccount.Label}
|
||||
default:
|
||||
return nil, &NotSingularError{davaccount.Label}
|
||||
}
|
||||
}
|
||||
|
||||
// OnlyX is like Only, but panics if an error occurs.
|
||||
func (daq *DavAccountQuery) OnlyX(ctx context.Context) *DavAccount {
|
||||
node, err := daq.Only(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// OnlyID is like Only, but returns the only DavAccount ID in the query.
|
||||
// Returns a *NotSingularError when more than one DavAccount ID is found.
|
||||
// Returns a *NotFoundError when no entities are found.
|
||||
func (daq *DavAccountQuery) OnlyID(ctx context.Context) (id int, err error) {
|
||||
var ids []int
|
||||
if ids, err = daq.Limit(2).IDs(setContextOp(ctx, daq.ctx, "OnlyID")); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
case 1:
|
||||
id = ids[0]
|
||||
case 0:
|
||||
err = &NotFoundError{davaccount.Label}
|
||||
default:
|
||||
err = &NotSingularError{davaccount.Label}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||
func (daq *DavAccountQuery) OnlyIDX(ctx context.Context) int {
|
||||
id, err := daq.OnlyID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// All executes the query and returns a list of DavAccounts.
|
||||
func (daq *DavAccountQuery) All(ctx context.Context) ([]*DavAccount, error) {
|
||||
ctx = setContextOp(ctx, daq.ctx, "All")
|
||||
if err := daq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qr := querierAll[[]*DavAccount, *DavAccountQuery]()
|
||||
return withInterceptors[[]*DavAccount](ctx, daq, qr, daq.inters)
|
||||
}
|
||||
|
||||
// AllX is like All, but panics if an error occurs.
|
||||
func (daq *DavAccountQuery) AllX(ctx context.Context) []*DavAccount {
|
||||
nodes, err := daq.All(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// IDs executes the query and returns a list of DavAccount IDs.
|
||||
func (daq *DavAccountQuery) IDs(ctx context.Context) (ids []int, err error) {
|
||||
if daq.ctx.Unique == nil && daq.path != nil {
|
||||
daq.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, daq.ctx, "IDs")
|
||||
if err = daq.Select(davaccount.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// IDsX is like IDs, but panics if an error occurs.
|
||||
func (daq *DavAccountQuery) IDsX(ctx context.Context) []int {
|
||||
ids, err := daq.IDs(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// Count returns the count of the given query.
|
||||
func (daq *DavAccountQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, daq.ctx, "Count")
|
||||
if err := daq.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return withInterceptors[int](ctx, daq, querierCount[*DavAccountQuery](), daq.inters)
|
||||
}
|
||||
|
||||
// CountX is like Count, but panics if an error occurs.
|
||||
func (daq *DavAccountQuery) CountX(ctx context.Context) int {
|
||||
count, err := daq.Count(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (daq *DavAccountQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, daq.ctx, "Exist")
|
||||
switch _, err := daq.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExistX is like Exist, but panics if an error occurs.
|
||||
func (daq *DavAccountQuery) ExistX(ctx context.Context) bool {
|
||||
exist, err := daq.Exist(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return exist
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the DavAccountQuery builder, including all associated steps. It can be
|
||||
// used to prepare common query builders and use them differently after the clone is made.
|
||||
func (daq *DavAccountQuery) Clone() *DavAccountQuery {
|
||||
if daq == nil {
|
||||
return nil
|
||||
}
|
||||
return &DavAccountQuery{
|
||||
config: daq.config,
|
||||
ctx: daq.ctx.Clone(),
|
||||
order: append([]davaccount.OrderOption{}, daq.order...),
|
||||
inters: append([]Interceptor{}, daq.inters...),
|
||||
predicates: append([]predicate.DavAccount{}, daq.predicates...),
|
||||
withOwner: daq.withOwner.Clone(),
|
||||
// clone intermediate query.
|
||||
sql: daq.sql.Clone(),
|
||||
path: daq.path,
|
||||
}
|
||||
}
|
||||
|
||||
// WithOwner tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "owner" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (daq *DavAccountQuery) WithOwner(opts ...func(*UserQuery)) *DavAccountQuery {
|
||||
query := (&UserClient{config: daq.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
daq.withOwner = query
|
||||
return daq
|
||||
}
|
||||
|
||||
// GroupBy is used to group vertices by one or more fields/columns.
|
||||
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// Count int `json:"count,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.DavAccount.Query().
|
||||
// GroupBy(davaccount.FieldCreatedAt).
|
||||
// Aggregate(ent.Count()).
|
||||
// Scan(ctx, &v)
|
||||
func (daq *DavAccountQuery) GroupBy(field string, fields ...string) *DavAccountGroupBy {
|
||||
daq.ctx.Fields = append([]string{field}, fields...)
|
||||
grbuild := &DavAccountGroupBy{build: daq}
|
||||
grbuild.flds = &daq.ctx.Fields
|
||||
grbuild.label = davaccount.Label
|
||||
grbuild.scan = grbuild.Scan
|
||||
return grbuild
|
||||
}
|
||||
|
||||
// Select allows the selection one or more fields/columns for the given query,
|
||||
// instead of selecting all fields in the entity.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.DavAccount.Query().
|
||||
// Select(davaccount.FieldCreatedAt).
|
||||
// Scan(ctx, &v)
|
||||
func (daq *DavAccountQuery) Select(fields ...string) *DavAccountSelect {
|
||||
daq.ctx.Fields = append(daq.ctx.Fields, fields...)
|
||||
sbuild := &DavAccountSelect{DavAccountQuery: daq}
|
||||
sbuild.label = davaccount.Label
|
||||
sbuild.flds, sbuild.scan = &daq.ctx.Fields, sbuild.Scan
|
||||
return sbuild
|
||||
}
|
||||
|
||||
// Aggregate returns a DavAccountSelect configured with the given aggregations.
|
||||
func (daq *DavAccountQuery) Aggregate(fns ...AggregateFunc) *DavAccountSelect {
|
||||
return daq.Select().Aggregate(fns...)
|
||||
}
|
||||
|
||||
func (daq *DavAccountQuery) prepareQuery(ctx context.Context) error {
|
||||
for _, inter := range daq.inters {
|
||||
if inter == nil {
|
||||
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||
}
|
||||
if trv, ok := inter.(Traverser); ok {
|
||||
if err := trv.Traverse(ctx, daq); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, f := range daq.ctx.Fields {
|
||||
if !davaccount.ValidColumn(f) {
|
||||
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
}
|
||||
if daq.path != nil {
|
||||
prev, err := daq.path(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
daq.sql = prev
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (daq *DavAccountQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DavAccount, error) {
|
||||
var (
|
||||
nodes = []*DavAccount{}
|
||||
_spec = daq.querySpec()
|
||||
loadedTypes = [1]bool{
|
||||
daq.withOwner != nil,
|
||||
}
|
||||
)
|
||||
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||
return (*DavAccount).scanValues(nil, columns)
|
||||
}
|
||||
_spec.Assign = func(columns []string, values []any) error {
|
||||
node := &DavAccount{config: daq.config}
|
||||
nodes = append(nodes, node)
|
||||
node.Edges.loadedTypes = loadedTypes
|
||||
return node.assignValues(columns, values)
|
||||
}
|
||||
for i := range hooks {
|
||||
hooks[i](ctx, _spec)
|
||||
}
|
||||
if err := sqlgraph.QueryNodes(ctx, daq.driver, _spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nodes, nil
|
||||
}
|
||||
if query := daq.withOwner; query != nil {
|
||||
if err := daq.loadOwner(ctx, query, nodes, nil,
|
||||
func(n *DavAccount, e *User) { n.Edges.Owner = e }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (daq *DavAccountQuery) loadOwner(ctx context.Context, query *UserQuery, nodes []*DavAccount, init func(*DavAccount), assign func(*DavAccount, *User)) error {
|
||||
ids := make([]int, 0, len(nodes))
|
||||
nodeids := make(map[int][]*DavAccount)
|
||||
for i := range nodes {
|
||||
fk := nodes[i].OwnerID
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
query.Where(user.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
nodes, ok := nodeids[n.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "owner_id" returned %v`, n.ID)
|
||||
}
|
||||
for i := range nodes {
|
||||
assign(nodes[i], n)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (daq *DavAccountQuery) sqlCount(ctx context.Context) (int, error) {
|
||||
_spec := daq.querySpec()
|
||||
_spec.Node.Columns = daq.ctx.Fields
|
||||
if len(daq.ctx.Fields) > 0 {
|
||||
_spec.Unique = daq.ctx.Unique != nil && *daq.ctx.Unique
|
||||
}
|
||||
return sqlgraph.CountNodes(ctx, daq.driver, _spec)
|
||||
}
|
||||
|
||||
func (daq *DavAccountQuery) querySpec() *sqlgraph.QuerySpec {
|
||||
_spec := sqlgraph.NewQuerySpec(davaccount.Table, davaccount.Columns, sqlgraph.NewFieldSpec(davaccount.FieldID, field.TypeInt))
|
||||
_spec.From = daq.sql
|
||||
if unique := daq.ctx.Unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
} else if daq.path != nil {
|
||||
_spec.Unique = true
|
||||
}
|
||||
if fields := daq.ctx.Fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, davaccount.FieldID)
|
||||
for i := range fields {
|
||||
if fields[i] != davaccount.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||
}
|
||||
}
|
||||
if daq.withOwner != nil {
|
||||
_spec.Node.AddColumnOnce(davaccount.FieldOwnerID)
|
||||
}
|
||||
}
|
||||
if ps := daq.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if limit := daq.ctx.Limit; limit != nil {
|
||||
_spec.Limit = *limit
|
||||
}
|
||||
if offset := daq.ctx.Offset; offset != nil {
|
||||
_spec.Offset = *offset
|
||||
}
|
||||
if ps := daq.order; len(ps) > 0 {
|
||||
_spec.Order = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
return _spec
|
||||
}
|
||||
|
||||
func (daq *DavAccountQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(daq.driver.Dialect())
|
||||
t1 := builder.Table(davaccount.Table)
|
||||
columns := daq.ctx.Fields
|
||||
if len(columns) == 0 {
|
||||
columns = davaccount.Columns
|
||||
}
|
||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||
if daq.sql != nil {
|
||||
selector = daq.sql
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
if daq.ctx.Unique != nil && *daq.ctx.Unique {
|
||||
selector.Distinct()
|
||||
}
|
||||
for _, p := range daq.predicates {
|
||||
p(selector)
|
||||
}
|
||||
for _, p := range daq.order {
|
||||
p(selector)
|
||||
}
|
||||
if offset := daq.ctx.Offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
// with default value, and override it below if needed.
|
||||
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||
}
|
||||
if limit := daq.ctx.Limit; limit != nil {
|
||||
selector.Limit(*limit)
|
||||
}
|
||||
return selector
|
||||
}
|
||||
|
||||
// DavAccountGroupBy is the group-by builder for DavAccount entities.
|
||||
type DavAccountGroupBy struct {
|
||||
selector
|
||||
build *DavAccountQuery
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the group-by query.
|
||||
func (dagb *DavAccountGroupBy) Aggregate(fns ...AggregateFunc) *DavAccountGroupBy {
|
||||
dagb.fns = append(dagb.fns, fns...)
|
||||
return dagb
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (dagb *DavAccountGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, dagb.build.ctx, "GroupBy")
|
||||
if err := dagb.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*DavAccountQuery, *DavAccountGroupBy](ctx, dagb.build, dagb, dagb.build.inters, v)
|
||||
}
|
||||
|
||||
func (dagb *DavAccountGroupBy) sqlScan(ctx context.Context, root *DavAccountQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx).Select()
|
||||
aggregation := make([]string, 0, len(dagb.fns))
|
||||
for _, fn := range dagb.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
if len(selector.SelectedColumns()) == 0 {
|
||||
columns := make([]string, 0, len(*dagb.flds)+len(dagb.fns))
|
||||
for _, f := range *dagb.flds {
|
||||
columns = append(columns, selector.C(f))
|
||||
}
|
||||
columns = append(columns, aggregation...)
|
||||
selector.Select(columns...)
|
||||
}
|
||||
selector.GroupBy(selector.Columns(*dagb.flds...)...)
|
||||
if err := selector.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := dagb.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
// DavAccountSelect is the builder for selecting fields of DavAccount entities.
|
||||
type DavAccountSelect struct {
|
||||
*DavAccountQuery
|
||||
selector
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the selector query.
|
||||
func (das *DavAccountSelect) Aggregate(fns ...AggregateFunc) *DavAccountSelect {
|
||||
das.fns = append(das.fns, fns...)
|
||||
return das
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (das *DavAccountSelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, das.ctx, "Select")
|
||||
if err := das.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*DavAccountQuery, *DavAccountSelect](ctx, das.DavAccountQuery, das, das.inters, v)
|
||||
}
|
||||
|
||||
func (das *DavAccountSelect) sqlScan(ctx context.Context, root *DavAccountQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx)
|
||||
aggregation := make([]string, 0, len(das.fns))
|
||||
for _, fn := range das.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
switch n := len(*das.selector.flds); {
|
||||
case n == 0 && len(aggregation) > 0:
|
||||
selector.Select(aggregation...)
|
||||
case n != 0 && len(aggregation) > 0:
|
||||
selector.AppendSelect(aggregation...)
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := das.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
565
ent/davaccount_update.go
Normal file
565
ent/davaccount_update.go
Normal file
@@ -0,0 +1,565 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/user"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
)
|
||||
|
||||
// DavAccountUpdate is the builder for updating DavAccount entities.
|
||||
type DavAccountUpdate struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *DavAccountMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the DavAccountUpdate builder.
|
||||
func (dau *DavAccountUpdate) Where(ps ...predicate.DavAccount) *DavAccountUpdate {
|
||||
dau.mutation.Where(ps...)
|
||||
return dau
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (dau *DavAccountUpdate) SetUpdatedAt(t time.Time) *DavAccountUpdate {
|
||||
dau.mutation.SetUpdatedAt(t)
|
||||
return dau
|
||||
}
|
||||
|
||||
// SetDeletedAt sets the "deleted_at" field.
|
||||
func (dau *DavAccountUpdate) SetDeletedAt(t time.Time) *DavAccountUpdate {
|
||||
dau.mutation.SetDeletedAt(t)
|
||||
return dau
|
||||
}
|
||||
|
||||
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
|
||||
func (dau *DavAccountUpdate) SetNillableDeletedAt(t *time.Time) *DavAccountUpdate {
|
||||
if t != nil {
|
||||
dau.SetDeletedAt(*t)
|
||||
}
|
||||
return dau
|
||||
}
|
||||
|
||||
// ClearDeletedAt clears the value of the "deleted_at" field.
|
||||
func (dau *DavAccountUpdate) ClearDeletedAt() *DavAccountUpdate {
|
||||
dau.mutation.ClearDeletedAt()
|
||||
return dau
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (dau *DavAccountUpdate) SetName(s string) *DavAccountUpdate {
|
||||
dau.mutation.SetName(s)
|
||||
return dau
|
||||
}
|
||||
|
||||
// SetNillableName sets the "name" field if the given value is not nil.
|
||||
func (dau *DavAccountUpdate) SetNillableName(s *string) *DavAccountUpdate {
|
||||
if s != nil {
|
||||
dau.SetName(*s)
|
||||
}
|
||||
return dau
|
||||
}
|
||||
|
||||
// SetURI sets the "uri" field.
|
||||
func (dau *DavAccountUpdate) SetURI(s string) *DavAccountUpdate {
|
||||
dau.mutation.SetURI(s)
|
||||
return dau
|
||||
}
|
||||
|
||||
// SetNillableURI sets the "uri" field if the given value is not nil.
|
||||
func (dau *DavAccountUpdate) SetNillableURI(s *string) *DavAccountUpdate {
|
||||
if s != nil {
|
||||
dau.SetURI(*s)
|
||||
}
|
||||
return dau
|
||||
}
|
||||
|
||||
// SetPassword sets the "password" field.
|
||||
func (dau *DavAccountUpdate) SetPassword(s string) *DavAccountUpdate {
|
||||
dau.mutation.SetPassword(s)
|
||||
return dau
|
||||
}
|
||||
|
||||
// SetNillablePassword sets the "password" field if the given value is not nil.
|
||||
func (dau *DavAccountUpdate) SetNillablePassword(s *string) *DavAccountUpdate {
|
||||
if s != nil {
|
||||
dau.SetPassword(*s)
|
||||
}
|
||||
return dau
|
||||
}
|
||||
|
||||
// SetOptions sets the "options" field.
|
||||
func (dau *DavAccountUpdate) SetOptions(bs *boolset.BooleanSet) *DavAccountUpdate {
|
||||
dau.mutation.SetOptions(bs)
|
||||
return dau
|
||||
}
|
||||
|
||||
// SetProps sets the "props" field.
|
||||
func (dau *DavAccountUpdate) SetProps(tap *types.DavAccountProps) *DavAccountUpdate {
|
||||
dau.mutation.SetProps(tap)
|
||||
return dau
|
||||
}
|
||||
|
||||
// ClearProps clears the value of the "props" field.
|
||||
func (dau *DavAccountUpdate) ClearProps() *DavAccountUpdate {
|
||||
dau.mutation.ClearProps()
|
||||
return dau
|
||||
}
|
||||
|
||||
// SetOwnerID sets the "owner_id" field.
|
||||
func (dau *DavAccountUpdate) SetOwnerID(i int) *DavAccountUpdate {
|
||||
dau.mutation.SetOwnerID(i)
|
||||
return dau
|
||||
}
|
||||
|
||||
// SetNillableOwnerID sets the "owner_id" field if the given value is not nil.
|
||||
func (dau *DavAccountUpdate) SetNillableOwnerID(i *int) *DavAccountUpdate {
|
||||
if i != nil {
|
||||
dau.SetOwnerID(*i)
|
||||
}
|
||||
return dau
|
||||
}
|
||||
|
||||
// SetOwner sets the "owner" edge to the User entity.
|
||||
func (dau *DavAccountUpdate) SetOwner(u *User) *DavAccountUpdate {
|
||||
return dau.SetOwnerID(u.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the DavAccountMutation object of the builder.
|
||||
func (dau *DavAccountUpdate) Mutation() *DavAccountMutation {
|
||||
return dau.mutation
|
||||
}
|
||||
|
||||
// ClearOwner clears the "owner" edge to the User entity.
|
||||
func (dau *DavAccountUpdate) ClearOwner() *DavAccountUpdate {
|
||||
dau.mutation.ClearOwner()
|
||||
return dau
|
||||
}
|
||||
|
||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||
func (dau *DavAccountUpdate) Save(ctx context.Context) (int, error) {
|
||||
if err := dau.defaults(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return withHooks(ctx, dau.sqlSave, dau.mutation, dau.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (dau *DavAccountUpdate) SaveX(ctx context.Context) int {
|
||||
affected, err := dau.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return affected
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (dau *DavAccountUpdate) Exec(ctx context.Context) error {
|
||||
_, err := dau.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (dau *DavAccountUpdate) ExecX(ctx context.Context) {
|
||||
if err := dau.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (dau *DavAccountUpdate) defaults() error {
|
||||
if _, ok := dau.mutation.UpdatedAt(); !ok {
|
||||
if davaccount.UpdateDefaultUpdatedAt == nil {
|
||||
return fmt.Errorf("ent: uninitialized davaccount.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
|
||||
}
|
||||
v := davaccount.UpdateDefaultUpdatedAt()
|
||||
dau.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (dau *DavAccountUpdate) check() error {
|
||||
if _, ok := dau.mutation.OwnerID(); dau.mutation.OwnerCleared() && !ok {
|
||||
return errors.New(`ent: clearing a required unique edge "DavAccount.owner"`)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dau *DavAccountUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
if err := dau.check(); err != nil {
|
||||
return n, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(davaccount.Table, davaccount.Columns, sqlgraph.NewFieldSpec(davaccount.FieldID, field.TypeInt))
|
||||
if ps := dau.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := dau.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(davaccount.FieldUpdatedAt, field.TypeTime, value)
|
||||
}
|
||||
if value, ok := dau.mutation.DeletedAt(); ok {
|
||||
_spec.SetField(davaccount.FieldDeletedAt, field.TypeTime, value)
|
||||
}
|
||||
if dau.mutation.DeletedAtCleared() {
|
||||
_spec.ClearField(davaccount.FieldDeletedAt, field.TypeTime)
|
||||
}
|
||||
if value, ok := dau.mutation.Name(); ok {
|
||||
_spec.SetField(davaccount.FieldName, field.TypeString, value)
|
||||
}
|
||||
if value, ok := dau.mutation.URI(); ok {
|
||||
_spec.SetField(davaccount.FieldURI, field.TypeString, value)
|
||||
}
|
||||
if value, ok := dau.mutation.Password(); ok {
|
||||
_spec.SetField(davaccount.FieldPassword, field.TypeString, value)
|
||||
}
|
||||
if value, ok := dau.mutation.Options(); ok {
|
||||
_spec.SetField(davaccount.FieldOptions, field.TypeBytes, value)
|
||||
}
|
||||
if value, ok := dau.mutation.Props(); ok {
|
||||
_spec.SetField(davaccount.FieldProps, field.TypeJSON, value)
|
||||
}
|
||||
if dau.mutation.PropsCleared() {
|
||||
_spec.ClearField(davaccount.FieldProps, field.TypeJSON)
|
||||
}
|
||||
if dau.mutation.OwnerCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: davaccount.OwnerTable,
|
||||
Columns: []string{davaccount.OwnerColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := dau.mutation.OwnerIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: davaccount.OwnerTable,
|
||||
Columns: []string{davaccount.OwnerColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if n, err = sqlgraph.UpdateNodes(ctx, dau.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{davaccount.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
dau.mutation.done = true
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// DavAccountUpdateOne is the builder for updating a single DavAccount entity.
|
||||
type DavAccountUpdateOne struct {
|
||||
config
|
||||
fields []string
|
||||
hooks []Hook
|
||||
mutation *DavAccountMutation
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (dauo *DavAccountUpdateOne) SetUpdatedAt(t time.Time) *DavAccountUpdateOne {
|
||||
dauo.mutation.SetUpdatedAt(t)
|
||||
return dauo
|
||||
}
|
||||
|
||||
// SetDeletedAt sets the "deleted_at" field.
|
||||
func (dauo *DavAccountUpdateOne) SetDeletedAt(t time.Time) *DavAccountUpdateOne {
|
||||
dauo.mutation.SetDeletedAt(t)
|
||||
return dauo
|
||||
}
|
||||
|
||||
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
|
||||
func (dauo *DavAccountUpdateOne) SetNillableDeletedAt(t *time.Time) *DavAccountUpdateOne {
|
||||
if t != nil {
|
||||
dauo.SetDeletedAt(*t)
|
||||
}
|
||||
return dauo
|
||||
}
|
||||
|
||||
// ClearDeletedAt clears the value of the "deleted_at" field.
|
||||
func (dauo *DavAccountUpdateOne) ClearDeletedAt() *DavAccountUpdateOne {
|
||||
dauo.mutation.ClearDeletedAt()
|
||||
return dauo
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (dauo *DavAccountUpdateOne) SetName(s string) *DavAccountUpdateOne {
|
||||
dauo.mutation.SetName(s)
|
||||
return dauo
|
||||
}
|
||||
|
||||
// SetNillableName sets the "name" field if the given value is not nil.
|
||||
func (dauo *DavAccountUpdateOne) SetNillableName(s *string) *DavAccountUpdateOne {
|
||||
if s != nil {
|
||||
dauo.SetName(*s)
|
||||
}
|
||||
return dauo
|
||||
}
|
||||
|
||||
// SetURI sets the "uri" field.
|
||||
func (dauo *DavAccountUpdateOne) SetURI(s string) *DavAccountUpdateOne {
|
||||
dauo.mutation.SetURI(s)
|
||||
return dauo
|
||||
}
|
||||
|
||||
// SetNillableURI sets the "uri" field if the given value is not nil.
|
||||
func (dauo *DavAccountUpdateOne) SetNillableURI(s *string) *DavAccountUpdateOne {
|
||||
if s != nil {
|
||||
dauo.SetURI(*s)
|
||||
}
|
||||
return dauo
|
||||
}
|
||||
|
||||
// SetPassword sets the "password" field.
|
||||
func (dauo *DavAccountUpdateOne) SetPassword(s string) *DavAccountUpdateOne {
|
||||
dauo.mutation.SetPassword(s)
|
||||
return dauo
|
||||
}
|
||||
|
||||
// SetNillablePassword sets the "password" field if the given value is not nil.
|
||||
func (dauo *DavAccountUpdateOne) SetNillablePassword(s *string) *DavAccountUpdateOne {
|
||||
if s != nil {
|
||||
dauo.SetPassword(*s)
|
||||
}
|
||||
return dauo
|
||||
}
|
||||
|
||||
// SetOptions sets the "options" field.
|
||||
func (dauo *DavAccountUpdateOne) SetOptions(bs *boolset.BooleanSet) *DavAccountUpdateOne {
|
||||
dauo.mutation.SetOptions(bs)
|
||||
return dauo
|
||||
}
|
||||
|
||||
// SetProps sets the "props" field.
|
||||
func (dauo *DavAccountUpdateOne) SetProps(tap *types.DavAccountProps) *DavAccountUpdateOne {
|
||||
dauo.mutation.SetProps(tap)
|
||||
return dauo
|
||||
}
|
||||
|
||||
// ClearProps clears the value of the "props" field.
|
||||
func (dauo *DavAccountUpdateOne) ClearProps() *DavAccountUpdateOne {
|
||||
dauo.mutation.ClearProps()
|
||||
return dauo
|
||||
}
|
||||
|
||||
// SetOwnerID sets the "owner_id" field.
|
||||
func (dauo *DavAccountUpdateOne) SetOwnerID(i int) *DavAccountUpdateOne {
|
||||
dauo.mutation.SetOwnerID(i)
|
||||
return dauo
|
||||
}
|
||||
|
||||
// SetNillableOwnerID sets the "owner_id" field if the given value is not nil.
|
||||
func (dauo *DavAccountUpdateOne) SetNillableOwnerID(i *int) *DavAccountUpdateOne {
|
||||
if i != nil {
|
||||
dauo.SetOwnerID(*i)
|
||||
}
|
||||
return dauo
|
||||
}
|
||||
|
||||
// SetOwner sets the "owner" edge to the User entity.
|
||||
func (dauo *DavAccountUpdateOne) SetOwner(u *User) *DavAccountUpdateOne {
|
||||
return dauo.SetOwnerID(u.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the DavAccountMutation object of the builder.
|
||||
func (dauo *DavAccountUpdateOne) Mutation() *DavAccountMutation {
|
||||
return dauo.mutation
|
||||
}
|
||||
|
||||
// ClearOwner clears the "owner" edge to the User entity.
|
||||
func (dauo *DavAccountUpdateOne) ClearOwner() *DavAccountUpdateOne {
|
||||
dauo.mutation.ClearOwner()
|
||||
return dauo
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the DavAccountUpdate builder.
|
||||
func (dauo *DavAccountUpdateOne) Where(ps ...predicate.DavAccount) *DavAccountUpdateOne {
|
||||
dauo.mutation.Where(ps...)
|
||||
return dauo
|
||||
}
|
||||
|
||||
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||
// The default is selecting all fields defined in the entity schema.
|
||||
func (dauo *DavAccountUpdateOne) Select(field string, fields ...string) *DavAccountUpdateOne {
|
||||
dauo.fields = append([]string{field}, fields...)
|
||||
return dauo
|
||||
}
|
||||
|
||||
// Save executes the query and returns the updated DavAccount entity.
|
||||
func (dauo *DavAccountUpdateOne) Save(ctx context.Context) (*DavAccount, error) {
|
||||
if err := dauo.defaults(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return withHooks(ctx, dauo.sqlSave, dauo.mutation, dauo.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (dauo *DavAccountUpdateOne) SaveX(ctx context.Context) *DavAccount {
|
||||
node, err := dauo.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// Exec executes the query on the entity.
|
||||
func (dauo *DavAccountUpdateOne) Exec(ctx context.Context) error {
|
||||
_, err := dauo.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (dauo *DavAccountUpdateOne) ExecX(ctx context.Context) {
|
||||
if err := dauo.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (dauo *DavAccountUpdateOne) defaults() error {
|
||||
if _, ok := dauo.mutation.UpdatedAt(); !ok {
|
||||
if davaccount.UpdateDefaultUpdatedAt == nil {
|
||||
return fmt.Errorf("ent: uninitialized davaccount.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
|
||||
}
|
||||
v := davaccount.UpdateDefaultUpdatedAt()
|
||||
dauo.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (dauo *DavAccountUpdateOne) check() error {
|
||||
if _, ok := dauo.mutation.OwnerID(); dauo.mutation.OwnerCleared() && !ok {
|
||||
return errors.New(`ent: clearing a required unique edge "DavAccount.owner"`)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dauo *DavAccountUpdateOne) sqlSave(ctx context.Context) (_node *DavAccount, err error) {
|
||||
if err := dauo.check(); err != nil {
|
||||
return _node, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(davaccount.Table, davaccount.Columns, sqlgraph.NewFieldSpec(davaccount.FieldID, field.TypeInt))
|
||||
id, ok := dauo.mutation.ID()
|
||||
if !ok {
|
||||
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "DavAccount.id" for update`)}
|
||||
}
|
||||
_spec.Node.ID.Value = id
|
||||
if fields := dauo.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, davaccount.FieldID)
|
||||
for _, f := range fields {
|
||||
if !davaccount.ValidColumn(f) {
|
||||
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
if f != davaccount.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := dauo.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := dauo.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(davaccount.FieldUpdatedAt, field.TypeTime, value)
|
||||
}
|
||||
if value, ok := dauo.mutation.DeletedAt(); ok {
|
||||
_spec.SetField(davaccount.FieldDeletedAt, field.TypeTime, value)
|
||||
}
|
||||
if dauo.mutation.DeletedAtCleared() {
|
||||
_spec.ClearField(davaccount.FieldDeletedAt, field.TypeTime)
|
||||
}
|
||||
if value, ok := dauo.mutation.Name(); ok {
|
||||
_spec.SetField(davaccount.FieldName, field.TypeString, value)
|
||||
}
|
||||
if value, ok := dauo.mutation.URI(); ok {
|
||||
_spec.SetField(davaccount.FieldURI, field.TypeString, value)
|
||||
}
|
||||
if value, ok := dauo.mutation.Password(); ok {
|
||||
_spec.SetField(davaccount.FieldPassword, field.TypeString, value)
|
||||
}
|
||||
if value, ok := dauo.mutation.Options(); ok {
|
||||
_spec.SetField(davaccount.FieldOptions, field.TypeBytes, value)
|
||||
}
|
||||
if value, ok := dauo.mutation.Props(); ok {
|
||||
_spec.SetField(davaccount.FieldProps, field.TypeJSON, value)
|
||||
}
|
||||
if dauo.mutation.PropsCleared() {
|
||||
_spec.ClearField(davaccount.FieldProps, field.TypeJSON)
|
||||
}
|
||||
if dauo.mutation.OwnerCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: davaccount.OwnerTable,
|
||||
Columns: []string{davaccount.OwnerColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := dauo.mutation.OwnerIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: davaccount.OwnerTable,
|
||||
Columns: []string{davaccount.OwnerColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
_node = &DavAccount{config: dauo.config}
|
||||
_spec.Assign = _node.assignValues
|
||||
_spec.ScanValues = _node.scanValues
|
||||
if err = sqlgraph.UpdateNode(ctx, dauo.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{davaccount.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
dauo.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
212
ent/directlink.go
Normal file
212
ent/directlink.go
Normal file
@@ -0,0 +1,212 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/file"
|
||||
)
|
||||
|
||||
// DirectLink is the model entity for the DirectLink schema.
|
||||
type DirectLink struct {
|
||||
config `json:"-"`
|
||||
// ID of the ent.
|
||||
ID int `json:"id,omitempty"`
|
||||
// CreatedAt holds the value of the "created_at" field.
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// UpdatedAt holds the value of the "updated_at" field.
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
// DeletedAt holds the value of the "deleted_at" field.
|
||||
DeletedAt *time.Time `json:"deleted_at,omitempty"`
|
||||
// Name holds the value of the "name" field.
|
||||
Name string `json:"name,omitempty"`
|
||||
// Downloads holds the value of the "downloads" field.
|
||||
Downloads int `json:"downloads,omitempty"`
|
||||
// FileID holds the value of the "file_id" field.
|
||||
FileID int `json:"file_id,omitempty"`
|
||||
// Speed holds the value of the "speed" field.
|
||||
Speed int `json:"speed,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the DirectLinkQuery when eager-loading is set.
|
||||
Edges DirectLinkEdges `json:"edges"`
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// DirectLinkEdges holds the relations/edges for other nodes in the graph.
|
||||
type DirectLinkEdges struct {
|
||||
// File holds the value of the file edge.
|
||||
File *File `json:"file,omitempty"`
|
||||
// loadedTypes holds the information for reporting if a
|
||||
// type was loaded (or requested) in eager-loading or not.
|
||||
loadedTypes [1]bool
|
||||
}
|
||||
|
||||
// FileOrErr returns the File value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e DirectLinkEdges) FileOrErr() (*File, error) {
|
||||
if e.loadedTypes[0] {
|
||||
if e.File == nil {
|
||||
// Edge was loaded but was not found.
|
||||
return nil, &NotFoundError{label: file.Label}
|
||||
}
|
||||
return e.File, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "file"}
|
||||
}
|
||||
|
||||
// scanValues returns the types for scanning values from sql.Rows.
|
||||
func (*DirectLink) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case directlink.FieldID, directlink.FieldDownloads, directlink.FieldFileID, directlink.FieldSpeed:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case directlink.FieldName:
|
||||
values[i] = new(sql.NullString)
|
||||
case directlink.FieldCreatedAt, directlink.FieldUpdatedAt, directlink.FieldDeletedAt:
|
||||
values[i] = new(sql.NullTime)
|
||||
default:
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||
// to the DirectLink fields.
|
||||
func (dl *DirectLink) assignValues(columns []string, values []any) error {
|
||||
if m, n := len(values), len(columns); m < n {
|
||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||
}
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case directlink.FieldID:
|
||||
value, ok := values[i].(*sql.NullInt64)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected type %T for field id", value)
|
||||
}
|
||||
dl.ID = int(value.Int64)
|
||||
case directlink.FieldCreatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||
} else if value.Valid {
|
||||
dl.CreatedAt = value.Time
|
||||
}
|
||||
case directlink.FieldUpdatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||
} else if value.Valid {
|
||||
dl.UpdatedAt = value.Time
|
||||
}
|
||||
case directlink.FieldDeletedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
|
||||
} else if value.Valid {
|
||||
dl.DeletedAt = new(time.Time)
|
||||
*dl.DeletedAt = value.Time
|
||||
}
|
||||
case directlink.FieldName:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field name", values[i])
|
||||
} else if value.Valid {
|
||||
dl.Name = value.String
|
||||
}
|
||||
case directlink.FieldDownloads:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field downloads", values[i])
|
||||
} else if value.Valid {
|
||||
dl.Downloads = int(value.Int64)
|
||||
}
|
||||
case directlink.FieldFileID:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field file_id", values[i])
|
||||
} else if value.Valid {
|
||||
dl.FileID = int(value.Int64)
|
||||
}
|
||||
case directlink.FieldSpeed:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field speed", values[i])
|
||||
} else if value.Valid {
|
||||
dl.Speed = int(value.Int64)
|
||||
}
|
||||
default:
|
||||
dl.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the DirectLink.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (dl *DirectLink) Value(name string) (ent.Value, error) {
|
||||
return dl.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryFile queries the "file" edge of the DirectLink entity.
|
||||
func (dl *DirectLink) QueryFile() *FileQuery {
|
||||
return NewDirectLinkClient(dl.config).QueryFile(dl)
|
||||
}
|
||||
|
||||
// Update returns a builder for updating this DirectLink.
|
||||
// Note that you need to call DirectLink.Unwrap() before calling this method if this DirectLink
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
func (dl *DirectLink) Update() *DirectLinkUpdateOne {
|
||||
return NewDirectLinkClient(dl.config).UpdateOne(dl)
|
||||
}
|
||||
|
||||
// Unwrap unwraps the DirectLink entity that was returned from a transaction after it was closed,
|
||||
// so that all future queries will be executed through the driver which created the transaction.
|
||||
func (dl *DirectLink) Unwrap() *DirectLink {
|
||||
_tx, ok := dl.config.driver.(*txDriver)
|
||||
if !ok {
|
||||
panic("ent: DirectLink is not a transactional entity")
|
||||
}
|
||||
dl.config.driver = _tx.drv
|
||||
return dl
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer.
|
||||
func (dl *DirectLink) String() string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("DirectLink(")
|
||||
builder.WriteString(fmt.Sprintf("id=%v, ", dl.ID))
|
||||
builder.WriteString("created_at=")
|
||||
builder.WriteString(dl.CreatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("updated_at=")
|
||||
builder.WriteString(dl.UpdatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
if v := dl.DeletedAt; v != nil {
|
||||
builder.WriteString("deleted_at=")
|
||||
builder.WriteString(v.Format(time.ANSIC))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("name=")
|
||||
builder.WriteString(dl.Name)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("downloads=")
|
||||
builder.WriteString(fmt.Sprintf("%v", dl.Downloads))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("file_id=")
|
||||
builder.WriteString(fmt.Sprintf("%v", dl.FileID))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("speed=")
|
||||
builder.WriteString(fmt.Sprintf("%v", dl.Speed))
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// SetFile manually set the edge as loaded state.
|
||||
func (e *DirectLink) SetFile(v *File) {
|
||||
e.Edges.File = v
|
||||
e.Edges.loadedTypes[0] = true
|
||||
}
|
||||
|
||||
// DirectLinks is a parsable slice of DirectLink.
|
||||
type DirectLinks []*DirectLink
|
||||
138
ent/directlink/directlink.go
Normal file
138
ent/directlink/directlink.go
Normal file
@@ -0,0 +1,138 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package directlink
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
const (
|
||||
// Label holds the string label denoting the directlink type in the database.
|
||||
Label = "direct_link"
|
||||
// FieldID holds the string denoting the id field in the database.
|
||||
FieldID = "id"
|
||||
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||
FieldCreatedAt = "created_at"
|
||||
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||
FieldUpdatedAt = "updated_at"
|
||||
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
|
||||
FieldDeletedAt = "deleted_at"
|
||||
// FieldName holds the string denoting the name field in the database.
|
||||
FieldName = "name"
|
||||
// FieldDownloads holds the string denoting the downloads field in the database.
|
||||
FieldDownloads = "downloads"
|
||||
// FieldFileID holds the string denoting the file_id field in the database.
|
||||
FieldFileID = "file_id"
|
||||
// FieldSpeed holds the string denoting the speed field in the database.
|
||||
FieldSpeed = "speed"
|
||||
// EdgeFile holds the string denoting the file edge name in mutations.
|
||||
EdgeFile = "file"
|
||||
// Table holds the table name of the directlink in the database.
|
||||
Table = "direct_links"
|
||||
// FileTable is the table that holds the file relation/edge.
|
||||
FileTable = "direct_links"
|
||||
// FileInverseTable is the table name for the File entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "file" package.
|
||||
FileInverseTable = "files"
|
||||
// FileColumn is the table column denoting the file relation/edge.
|
||||
FileColumn = "file_id"
|
||||
)
|
||||
|
||||
// Columns holds all SQL columns for directlink fields.
|
||||
var Columns = []string{
|
||||
FieldID,
|
||||
FieldCreatedAt,
|
||||
FieldUpdatedAt,
|
||||
FieldDeletedAt,
|
||||
FieldName,
|
||||
FieldDownloads,
|
||||
FieldFileID,
|
||||
FieldSpeed,
|
||||
}
|
||||
|
||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||
func ValidColumn(column string) bool {
|
||||
for i := range Columns {
|
||||
if column == Columns[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Note that the variables below are initialized by the runtime
|
||||
// package on the initialization of the application. Therefore,
|
||||
// it should be imported in the main as follows:
|
||||
//
|
||||
// import _ "github.com/cloudreve/Cloudreve/v4/ent/runtime"
|
||||
var (
|
||||
Hooks [1]ent.Hook
|
||||
Interceptors [1]ent.Interceptor
|
||||
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||
DefaultCreatedAt func() time.Time
|
||||
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||
DefaultUpdatedAt func() time.Time
|
||||
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||
UpdateDefaultUpdatedAt func() time.Time
|
||||
)
|
||||
|
||||
// OrderOption defines the ordering options for the DirectLink queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreatedAt orders the results by the created_at field.
|
||||
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUpdatedAt orders the results by the updated_at field.
|
||||
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByDeletedAt orders the results by the deleted_at field.
|
||||
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByName orders the results by the name field.
|
||||
func ByName(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldName, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByDownloads orders the results by the downloads field.
|
||||
func ByDownloads(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldDownloads, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByFileID orders the results by the file_id field.
|
||||
func ByFileID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldFileID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySpeed orders the results by the speed field.
|
||||
func BySpeed(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSpeed, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByFileField orders the results by file field.
|
||||
func ByFileField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newFileStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
func newFileStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(FileInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, FileTable, FileColumn),
|
||||
)
|
||||
}
|
||||
424
ent/directlink/where.go
Normal file
424
ent/directlink/where.go
Normal file
@@ -0,0 +1,424 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package directlink
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
|
||||
)
|
||||
|
||||
// ID filters vertices based on their ID field.
|
||||
func ID(id int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDEQ applies the EQ predicate on the ID field.
|
||||
func IDEQ(id int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDNEQ applies the NEQ predicate on the ID field.
|
||||
func IDNEQ(id int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldNEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDIn applies the In predicate on the ID field.
|
||||
func IDIn(ids ...int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDNotIn applies the NotIn predicate on the ID field.
|
||||
func IDNotIn(ids ...int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldNotIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDGT applies the GT predicate on the ID field.
|
||||
func IDGT(id int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldGT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDGTE applies the GTE predicate on the ID field.
|
||||
func IDGTE(id int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldGTE(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLT applies the LT predicate on the ID field.
|
||||
func IDLT(id int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldLT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLTE applies the LTE predicate on the ID field.
|
||||
func IDLTE(id int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldLTE(FieldID, id))
|
||||
}
|
||||
|
||||
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||
func CreatedAt(v time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
|
||||
func UpdatedAt(v time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ.
|
||||
func DeletedAt(v time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldEQ(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
|
||||
func Name(v string) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldEQ(FieldName, v))
|
||||
}
|
||||
|
||||
// Downloads applies equality check predicate on the "downloads" field. It's identical to DownloadsEQ.
|
||||
func Downloads(v int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldEQ(FieldDownloads, v))
|
||||
}
|
||||
|
||||
// FileID applies equality check predicate on the "file_id" field. It's identical to FileIDEQ.
|
||||
func FileID(v int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldEQ(FieldFileID, v))
|
||||
}
|
||||
|
||||
// Speed applies equality check predicate on the "speed" field. It's identical to SpeedEQ.
|
||||
func Speed(v int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldEQ(FieldSpeed, v))
|
||||
}
|
||||
|
||||
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||
func CreatedAtEQ(v time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||
func CreatedAtNEQ(v time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldNEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||
func CreatedAtIn(vs ...time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||
func CreatedAtNotIn(vs ...time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||
func CreatedAtGT(v time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldGT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||
func CreatedAtGTE(v time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldGTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||
func CreatedAtLT(v time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldLT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||
func CreatedAtLTE(v time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldLTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
|
||||
func UpdatedAtEQ(v time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
|
||||
func UpdatedAtNEQ(v time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldNEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtIn applies the In predicate on the "updated_at" field.
|
||||
func UpdatedAtIn(vs ...time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
|
||||
func UpdatedAtNotIn(vs ...time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldNotIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
|
||||
func UpdatedAtGT(v time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldGT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
|
||||
func UpdatedAtGTE(v time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldGTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
|
||||
func UpdatedAtLT(v time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldLT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
|
||||
func UpdatedAtLTE(v time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldLTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtEQ applies the EQ predicate on the "deleted_at" field.
|
||||
func DeletedAtEQ(v time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldEQ(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field.
|
||||
func DeletedAtNEQ(v time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldNEQ(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtIn applies the In predicate on the "deleted_at" field.
|
||||
func DeletedAtIn(vs ...time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldIn(FieldDeletedAt, vs...))
|
||||
}
|
||||
|
||||
// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field.
|
||||
func DeletedAtNotIn(vs ...time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldNotIn(FieldDeletedAt, vs...))
|
||||
}
|
||||
|
||||
// DeletedAtGT applies the GT predicate on the "deleted_at" field.
|
||||
func DeletedAtGT(v time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldGT(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtGTE applies the GTE predicate on the "deleted_at" field.
|
||||
func DeletedAtGTE(v time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldGTE(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtLT applies the LT predicate on the "deleted_at" field.
|
||||
func DeletedAtLT(v time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldLT(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtLTE applies the LTE predicate on the "deleted_at" field.
|
||||
func DeletedAtLTE(v time.Time) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldLTE(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field.
|
||||
func DeletedAtIsNil() predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldIsNull(FieldDeletedAt))
|
||||
}
|
||||
|
||||
// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field.
|
||||
func DeletedAtNotNil() predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldNotNull(FieldDeletedAt))
|
||||
}
|
||||
|
||||
// NameEQ applies the EQ predicate on the "name" field.
|
||||
func NameEQ(v string) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldEQ(FieldName, v))
|
||||
}
|
||||
|
||||
// NameNEQ applies the NEQ predicate on the "name" field.
|
||||
func NameNEQ(v string) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldNEQ(FieldName, v))
|
||||
}
|
||||
|
||||
// NameIn applies the In predicate on the "name" field.
|
||||
func NameIn(vs ...string) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldIn(FieldName, vs...))
|
||||
}
|
||||
|
||||
// NameNotIn applies the NotIn predicate on the "name" field.
|
||||
func NameNotIn(vs ...string) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldNotIn(FieldName, vs...))
|
||||
}
|
||||
|
||||
// NameGT applies the GT predicate on the "name" field.
|
||||
func NameGT(v string) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldGT(FieldName, v))
|
||||
}
|
||||
|
||||
// NameGTE applies the GTE predicate on the "name" field.
|
||||
func NameGTE(v string) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldGTE(FieldName, v))
|
||||
}
|
||||
|
||||
// NameLT applies the LT predicate on the "name" field.
|
||||
func NameLT(v string) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldLT(FieldName, v))
|
||||
}
|
||||
|
||||
// NameLTE applies the LTE predicate on the "name" field.
|
||||
func NameLTE(v string) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldLTE(FieldName, v))
|
||||
}
|
||||
|
||||
// NameContains applies the Contains predicate on the "name" field.
|
||||
func NameContains(v string) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldContains(FieldName, v))
|
||||
}
|
||||
|
||||
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
|
||||
func NameHasPrefix(v string) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldHasPrefix(FieldName, v))
|
||||
}
|
||||
|
||||
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
|
||||
func NameHasSuffix(v string) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldHasSuffix(FieldName, v))
|
||||
}
|
||||
|
||||
// NameEqualFold applies the EqualFold predicate on the "name" field.
|
||||
func NameEqualFold(v string) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldEqualFold(FieldName, v))
|
||||
}
|
||||
|
||||
// NameContainsFold applies the ContainsFold predicate on the "name" field.
|
||||
func NameContainsFold(v string) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldContainsFold(FieldName, v))
|
||||
}
|
||||
|
||||
// DownloadsEQ applies the EQ predicate on the "downloads" field.
|
||||
func DownloadsEQ(v int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldEQ(FieldDownloads, v))
|
||||
}
|
||||
|
||||
// DownloadsNEQ applies the NEQ predicate on the "downloads" field.
|
||||
func DownloadsNEQ(v int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldNEQ(FieldDownloads, v))
|
||||
}
|
||||
|
||||
// DownloadsIn applies the In predicate on the "downloads" field.
|
||||
func DownloadsIn(vs ...int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldIn(FieldDownloads, vs...))
|
||||
}
|
||||
|
||||
// DownloadsNotIn applies the NotIn predicate on the "downloads" field.
|
||||
func DownloadsNotIn(vs ...int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldNotIn(FieldDownloads, vs...))
|
||||
}
|
||||
|
||||
// DownloadsGT applies the GT predicate on the "downloads" field.
|
||||
func DownloadsGT(v int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldGT(FieldDownloads, v))
|
||||
}
|
||||
|
||||
// DownloadsGTE applies the GTE predicate on the "downloads" field.
|
||||
func DownloadsGTE(v int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldGTE(FieldDownloads, v))
|
||||
}
|
||||
|
||||
// DownloadsLT applies the LT predicate on the "downloads" field.
|
||||
func DownloadsLT(v int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldLT(FieldDownloads, v))
|
||||
}
|
||||
|
||||
// DownloadsLTE applies the LTE predicate on the "downloads" field.
|
||||
func DownloadsLTE(v int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldLTE(FieldDownloads, v))
|
||||
}
|
||||
|
||||
// FileIDEQ applies the EQ predicate on the "file_id" field.
|
||||
func FileIDEQ(v int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldEQ(FieldFileID, v))
|
||||
}
|
||||
|
||||
// FileIDNEQ applies the NEQ predicate on the "file_id" field.
|
||||
func FileIDNEQ(v int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldNEQ(FieldFileID, v))
|
||||
}
|
||||
|
||||
// FileIDIn applies the In predicate on the "file_id" field.
|
||||
func FileIDIn(vs ...int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldIn(FieldFileID, vs...))
|
||||
}
|
||||
|
||||
// FileIDNotIn applies the NotIn predicate on the "file_id" field.
|
||||
func FileIDNotIn(vs ...int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldNotIn(FieldFileID, vs...))
|
||||
}
|
||||
|
||||
// SpeedEQ applies the EQ predicate on the "speed" field.
|
||||
func SpeedEQ(v int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldEQ(FieldSpeed, v))
|
||||
}
|
||||
|
||||
// SpeedNEQ applies the NEQ predicate on the "speed" field.
|
||||
func SpeedNEQ(v int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldNEQ(FieldSpeed, v))
|
||||
}
|
||||
|
||||
// SpeedIn applies the In predicate on the "speed" field.
|
||||
func SpeedIn(vs ...int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldIn(FieldSpeed, vs...))
|
||||
}
|
||||
|
||||
// SpeedNotIn applies the NotIn predicate on the "speed" field.
|
||||
func SpeedNotIn(vs ...int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldNotIn(FieldSpeed, vs...))
|
||||
}
|
||||
|
||||
// SpeedGT applies the GT predicate on the "speed" field.
|
||||
func SpeedGT(v int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldGT(FieldSpeed, v))
|
||||
}
|
||||
|
||||
// SpeedGTE applies the GTE predicate on the "speed" field.
|
||||
func SpeedGTE(v int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldGTE(FieldSpeed, v))
|
||||
}
|
||||
|
||||
// SpeedLT applies the LT predicate on the "speed" field.
|
||||
func SpeedLT(v int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldLT(FieldSpeed, v))
|
||||
}
|
||||
|
||||
// SpeedLTE applies the LTE predicate on the "speed" field.
|
||||
func SpeedLTE(v int) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.FieldLTE(FieldSpeed, v))
|
||||
}
|
||||
|
||||
// HasFile applies the HasEdge predicate on the "file" edge.
|
||||
func HasFile() predicate.DirectLink {
|
||||
return predicate.DirectLink(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, FileTable, FileColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasFileWith applies the HasEdge predicate on the "file" edge with a given conditions (other predicates).
|
||||
func HasFileWith(preds ...predicate.File) predicate.DirectLink {
|
||||
return predicate.DirectLink(func(s *sql.Selector) {
|
||||
step := newFileStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.DirectLink) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.DirectLink) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.DirectLink) predicate.DirectLink {
|
||||
return predicate.DirectLink(sql.NotPredicates(p))
|
||||
}
|
||||
883
ent/directlink_create.go
Normal file
883
ent/directlink_create.go
Normal file
@@ -0,0 +1,883 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/file"
|
||||
)
|
||||
|
||||
// DirectLinkCreate is the builder for creating a DirectLink entity.
|
||||
type DirectLinkCreate struct {
|
||||
config
|
||||
mutation *DirectLinkMutation
|
||||
hooks []Hook
|
||||
conflict []sql.ConflictOption
|
||||
}
|
||||
|
||||
// SetCreatedAt sets the "created_at" field.
|
||||
func (dlc *DirectLinkCreate) SetCreatedAt(t time.Time) *DirectLinkCreate {
|
||||
dlc.mutation.SetCreatedAt(t)
|
||||
return dlc
|
||||
}
|
||||
|
||||
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
|
||||
func (dlc *DirectLinkCreate) SetNillableCreatedAt(t *time.Time) *DirectLinkCreate {
|
||||
if t != nil {
|
||||
dlc.SetCreatedAt(*t)
|
||||
}
|
||||
return dlc
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (dlc *DirectLinkCreate) SetUpdatedAt(t time.Time) *DirectLinkCreate {
|
||||
dlc.mutation.SetUpdatedAt(t)
|
||||
return dlc
|
||||
}
|
||||
|
||||
// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
|
||||
func (dlc *DirectLinkCreate) SetNillableUpdatedAt(t *time.Time) *DirectLinkCreate {
|
||||
if t != nil {
|
||||
dlc.SetUpdatedAt(*t)
|
||||
}
|
||||
return dlc
|
||||
}
|
||||
|
||||
// SetDeletedAt sets the "deleted_at" field.
|
||||
func (dlc *DirectLinkCreate) SetDeletedAt(t time.Time) *DirectLinkCreate {
|
||||
dlc.mutation.SetDeletedAt(t)
|
||||
return dlc
|
||||
}
|
||||
|
||||
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
|
||||
func (dlc *DirectLinkCreate) SetNillableDeletedAt(t *time.Time) *DirectLinkCreate {
|
||||
if t != nil {
|
||||
dlc.SetDeletedAt(*t)
|
||||
}
|
||||
return dlc
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (dlc *DirectLinkCreate) SetName(s string) *DirectLinkCreate {
|
||||
dlc.mutation.SetName(s)
|
||||
return dlc
|
||||
}
|
||||
|
||||
// SetDownloads sets the "downloads" field.
|
||||
func (dlc *DirectLinkCreate) SetDownloads(i int) *DirectLinkCreate {
|
||||
dlc.mutation.SetDownloads(i)
|
||||
return dlc
|
||||
}
|
||||
|
||||
// SetFileID sets the "file_id" field.
|
||||
func (dlc *DirectLinkCreate) SetFileID(i int) *DirectLinkCreate {
|
||||
dlc.mutation.SetFileID(i)
|
||||
return dlc
|
||||
}
|
||||
|
||||
// SetSpeed sets the "speed" field.
|
||||
func (dlc *DirectLinkCreate) SetSpeed(i int) *DirectLinkCreate {
|
||||
dlc.mutation.SetSpeed(i)
|
||||
return dlc
|
||||
}
|
||||
|
||||
// SetFile sets the "file" edge to the File entity.
|
||||
func (dlc *DirectLinkCreate) SetFile(f *File) *DirectLinkCreate {
|
||||
return dlc.SetFileID(f.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the DirectLinkMutation object of the builder.
|
||||
func (dlc *DirectLinkCreate) Mutation() *DirectLinkMutation {
|
||||
return dlc.mutation
|
||||
}
|
||||
|
||||
// Save creates the DirectLink in the database.
|
||||
func (dlc *DirectLinkCreate) Save(ctx context.Context) (*DirectLink, error) {
|
||||
if err := dlc.defaults(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return withHooks(ctx, dlc.sqlSave, dlc.mutation, dlc.hooks)
|
||||
}
|
||||
|
||||
// SaveX calls Save and panics if Save returns an error.
|
||||
func (dlc *DirectLinkCreate) SaveX(ctx context.Context) *DirectLink {
|
||||
v, err := dlc.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (dlc *DirectLinkCreate) Exec(ctx context.Context) error {
|
||||
_, err := dlc.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (dlc *DirectLinkCreate) ExecX(ctx context.Context) {
|
||||
if err := dlc.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (dlc *DirectLinkCreate) defaults() error {
|
||||
if _, ok := dlc.mutation.CreatedAt(); !ok {
|
||||
if directlink.DefaultCreatedAt == nil {
|
||||
return fmt.Errorf("ent: uninitialized directlink.DefaultCreatedAt (forgotten import ent/runtime?)")
|
||||
}
|
||||
v := directlink.DefaultCreatedAt()
|
||||
dlc.mutation.SetCreatedAt(v)
|
||||
}
|
||||
if _, ok := dlc.mutation.UpdatedAt(); !ok {
|
||||
if directlink.DefaultUpdatedAt == nil {
|
||||
return fmt.Errorf("ent: uninitialized directlink.DefaultUpdatedAt (forgotten import ent/runtime?)")
|
||||
}
|
||||
v := directlink.DefaultUpdatedAt()
|
||||
dlc.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (dlc *DirectLinkCreate) check() error {
|
||||
if _, ok := dlc.mutation.CreatedAt(); !ok {
|
||||
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "DirectLink.created_at"`)}
|
||||
}
|
||||
if _, ok := dlc.mutation.UpdatedAt(); !ok {
|
||||
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "DirectLink.updated_at"`)}
|
||||
}
|
||||
if _, ok := dlc.mutation.Name(); !ok {
|
||||
return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "DirectLink.name"`)}
|
||||
}
|
||||
if _, ok := dlc.mutation.Downloads(); !ok {
|
||||
return &ValidationError{Name: "downloads", err: errors.New(`ent: missing required field "DirectLink.downloads"`)}
|
||||
}
|
||||
if _, ok := dlc.mutation.FileID(); !ok {
|
||||
return &ValidationError{Name: "file_id", err: errors.New(`ent: missing required field "DirectLink.file_id"`)}
|
||||
}
|
||||
if _, ok := dlc.mutation.Speed(); !ok {
|
||||
return &ValidationError{Name: "speed", err: errors.New(`ent: missing required field "DirectLink.speed"`)}
|
||||
}
|
||||
if _, ok := dlc.mutation.FileID(); !ok {
|
||||
return &ValidationError{Name: "file", err: errors.New(`ent: missing required edge "DirectLink.file"`)}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dlc *DirectLinkCreate) sqlSave(ctx context.Context) (*DirectLink, error) {
|
||||
if err := dlc.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_node, _spec := dlc.createSpec()
|
||||
if err := sqlgraph.CreateNode(ctx, dlc.driver, _spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
id := _spec.ID.Value.(int64)
|
||||
_node.ID = int(id)
|
||||
dlc.mutation.id = &_node.ID
|
||||
dlc.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
|
||||
func (dlc *DirectLinkCreate) createSpec() (*DirectLink, *sqlgraph.CreateSpec) {
|
||||
var (
|
||||
_node = &DirectLink{config: dlc.config}
|
||||
_spec = sqlgraph.NewCreateSpec(directlink.Table, sqlgraph.NewFieldSpec(directlink.FieldID, field.TypeInt))
|
||||
)
|
||||
|
||||
if id, ok := dlc.mutation.ID(); ok {
|
||||
_node.ID = id
|
||||
id64 := int64(id)
|
||||
_spec.ID.Value = id64
|
||||
}
|
||||
|
||||
_spec.OnConflict = dlc.conflict
|
||||
if value, ok := dlc.mutation.CreatedAt(); ok {
|
||||
_spec.SetField(directlink.FieldCreatedAt, field.TypeTime, value)
|
||||
_node.CreatedAt = value
|
||||
}
|
||||
if value, ok := dlc.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(directlink.FieldUpdatedAt, field.TypeTime, value)
|
||||
_node.UpdatedAt = value
|
||||
}
|
||||
if value, ok := dlc.mutation.DeletedAt(); ok {
|
||||
_spec.SetField(directlink.FieldDeletedAt, field.TypeTime, value)
|
||||
_node.DeletedAt = &value
|
||||
}
|
||||
if value, ok := dlc.mutation.Name(); ok {
|
||||
_spec.SetField(directlink.FieldName, field.TypeString, value)
|
||||
_node.Name = value
|
||||
}
|
||||
if value, ok := dlc.mutation.Downloads(); ok {
|
||||
_spec.SetField(directlink.FieldDownloads, field.TypeInt, value)
|
||||
_node.Downloads = value
|
||||
}
|
||||
if value, ok := dlc.mutation.Speed(); ok {
|
||||
_spec.SetField(directlink.FieldSpeed, field.TypeInt, value)
|
||||
_node.Speed = value
|
||||
}
|
||||
if nodes := dlc.mutation.FileIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: directlink.FileTable,
|
||||
Columns: []string{directlink.FileColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(file.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_node.FileID = nodes[0]
|
||||
_spec.Edges = append(_spec.Edges, edge)
|
||||
}
|
||||
return _node, _spec
|
||||
}
|
||||
|
||||
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||
// of the `INSERT` statement. For example:
|
||||
//
|
||||
// client.DirectLink.Create().
|
||||
// SetCreatedAt(v).
|
||||
// OnConflict(
|
||||
// // Update the row with the new values
|
||||
// // the was proposed for insertion.
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// // Override some of the fields with custom
|
||||
// // update values.
|
||||
// Update(func(u *ent.DirectLinkUpsert) {
|
||||
// SetCreatedAt(v+v).
|
||||
// }).
|
||||
// Exec(ctx)
|
||||
func (dlc *DirectLinkCreate) OnConflict(opts ...sql.ConflictOption) *DirectLinkUpsertOne {
|
||||
dlc.conflict = opts
|
||||
return &DirectLinkUpsertOne{
|
||||
create: dlc,
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||
// as conflict target. Using this option is equivalent to using:
|
||||
//
|
||||
// client.DirectLink.Create().
|
||||
// OnConflict(sql.ConflictColumns(columns...)).
|
||||
// Exec(ctx)
|
||||
func (dlc *DirectLinkCreate) OnConflictColumns(columns ...string) *DirectLinkUpsertOne {
|
||||
dlc.conflict = append(dlc.conflict, sql.ConflictColumns(columns...))
|
||||
return &DirectLinkUpsertOne{
|
||||
create: dlc,
|
||||
}
|
||||
}
|
||||
|
||||
type (
|
||||
// DirectLinkUpsertOne is the builder for "upsert"-ing
|
||||
// one DirectLink node.
|
||||
DirectLinkUpsertOne struct {
|
||||
create *DirectLinkCreate
|
||||
}
|
||||
|
||||
// DirectLinkUpsert is the "OnConflict" setter.
|
||||
DirectLinkUpsert struct {
|
||||
*sql.UpdateSet
|
||||
}
|
||||
)
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (u *DirectLinkUpsert) SetUpdatedAt(v time.Time) *DirectLinkUpsert {
|
||||
u.Set(directlink.FieldUpdatedAt, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||
func (u *DirectLinkUpsert) UpdateUpdatedAt() *DirectLinkUpsert {
|
||||
u.SetExcluded(directlink.FieldUpdatedAt)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetDeletedAt sets the "deleted_at" field.
|
||||
func (u *DirectLinkUpsert) SetDeletedAt(v time.Time) *DirectLinkUpsert {
|
||||
u.Set(directlink.FieldDeletedAt, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
|
||||
func (u *DirectLinkUpsert) UpdateDeletedAt() *DirectLinkUpsert {
|
||||
u.SetExcluded(directlink.FieldDeletedAt)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearDeletedAt clears the value of the "deleted_at" field.
|
||||
func (u *DirectLinkUpsert) ClearDeletedAt() *DirectLinkUpsert {
|
||||
u.SetNull(directlink.FieldDeletedAt)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (u *DirectLinkUpsert) SetName(v string) *DirectLinkUpsert {
|
||||
u.Set(directlink.FieldName, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateName sets the "name" field to the value that was provided on create.
|
||||
func (u *DirectLinkUpsert) UpdateName() *DirectLinkUpsert {
|
||||
u.SetExcluded(directlink.FieldName)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetDownloads sets the "downloads" field.
|
||||
func (u *DirectLinkUpsert) SetDownloads(v int) *DirectLinkUpsert {
|
||||
u.Set(directlink.FieldDownloads, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateDownloads sets the "downloads" field to the value that was provided on create.
|
||||
func (u *DirectLinkUpsert) UpdateDownloads() *DirectLinkUpsert {
|
||||
u.SetExcluded(directlink.FieldDownloads)
|
||||
return u
|
||||
}
|
||||
|
||||
// AddDownloads adds v to the "downloads" field.
|
||||
func (u *DirectLinkUpsert) AddDownloads(v int) *DirectLinkUpsert {
|
||||
u.Add(directlink.FieldDownloads, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetFileID sets the "file_id" field.
|
||||
func (u *DirectLinkUpsert) SetFileID(v int) *DirectLinkUpsert {
|
||||
u.Set(directlink.FieldFileID, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateFileID sets the "file_id" field to the value that was provided on create.
|
||||
func (u *DirectLinkUpsert) UpdateFileID() *DirectLinkUpsert {
|
||||
u.SetExcluded(directlink.FieldFileID)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetSpeed sets the "speed" field.
|
||||
func (u *DirectLinkUpsert) SetSpeed(v int) *DirectLinkUpsert {
|
||||
u.Set(directlink.FieldSpeed, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateSpeed sets the "speed" field to the value that was provided on create.
|
||||
func (u *DirectLinkUpsert) UpdateSpeed() *DirectLinkUpsert {
|
||||
u.SetExcluded(directlink.FieldSpeed)
|
||||
return u
|
||||
}
|
||||
|
||||
// AddSpeed adds v to the "speed" field.
|
||||
func (u *DirectLinkUpsert) AddSpeed(v int) *DirectLinkUpsert {
|
||||
u.Add(directlink.FieldSpeed, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.DirectLink.Create().
|
||||
// OnConflict(
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// Exec(ctx)
|
||||
func (u *DirectLinkUpsertOne) UpdateNewValues() *DirectLinkUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||
if _, exists := u.create.mutation.CreatedAt(); exists {
|
||||
s.SetIgnore(directlink.FieldCreatedAt)
|
||||
}
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// Ignore sets each column to itself in case of conflict.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.DirectLink.Create().
|
||||
// OnConflict(sql.ResolveWithIgnore()).
|
||||
// Exec(ctx)
|
||||
func (u *DirectLinkUpsertOne) Ignore() *DirectLinkUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||
return u
|
||||
}
|
||||
|
||||
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||
// Supported only by SQLite and PostgreSQL.
|
||||
func (u *DirectLinkUpsertOne) DoNothing() *DirectLinkUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||
return u
|
||||
}
|
||||
|
||||
// Update allows overriding fields `UPDATE` values. See the DirectLinkCreate.OnConflict
|
||||
// documentation for more info.
|
||||
func (u *DirectLinkUpsertOne) Update(set func(*DirectLinkUpsert)) *DirectLinkUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||
set(&DirectLinkUpsert{UpdateSet: update})
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (u *DirectLinkUpsertOne) SetUpdatedAt(v time.Time) *DirectLinkUpsertOne {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.SetUpdatedAt(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||
func (u *DirectLinkUpsertOne) UpdateUpdatedAt() *DirectLinkUpsertOne {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.UpdateUpdatedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// SetDeletedAt sets the "deleted_at" field.
|
||||
func (u *DirectLinkUpsertOne) SetDeletedAt(v time.Time) *DirectLinkUpsertOne {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.SetDeletedAt(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
|
||||
func (u *DirectLinkUpsertOne) UpdateDeletedAt() *DirectLinkUpsertOne {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.UpdateDeletedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearDeletedAt clears the value of the "deleted_at" field.
|
||||
func (u *DirectLinkUpsertOne) ClearDeletedAt() *DirectLinkUpsertOne {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.ClearDeletedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (u *DirectLinkUpsertOne) SetName(v string) *DirectLinkUpsertOne {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.SetName(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateName sets the "name" field to the value that was provided on create.
|
||||
func (u *DirectLinkUpsertOne) UpdateName() *DirectLinkUpsertOne {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.UpdateName()
|
||||
})
|
||||
}
|
||||
|
||||
// SetDownloads sets the "downloads" field.
|
||||
func (u *DirectLinkUpsertOne) SetDownloads(v int) *DirectLinkUpsertOne {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.SetDownloads(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddDownloads adds v to the "downloads" field.
|
||||
func (u *DirectLinkUpsertOne) AddDownloads(v int) *DirectLinkUpsertOne {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.AddDownloads(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateDownloads sets the "downloads" field to the value that was provided on create.
|
||||
func (u *DirectLinkUpsertOne) UpdateDownloads() *DirectLinkUpsertOne {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.UpdateDownloads()
|
||||
})
|
||||
}
|
||||
|
||||
// SetFileID sets the "file_id" field.
|
||||
func (u *DirectLinkUpsertOne) SetFileID(v int) *DirectLinkUpsertOne {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.SetFileID(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateFileID sets the "file_id" field to the value that was provided on create.
|
||||
func (u *DirectLinkUpsertOne) UpdateFileID() *DirectLinkUpsertOne {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.UpdateFileID()
|
||||
})
|
||||
}
|
||||
|
||||
// SetSpeed sets the "speed" field.
|
||||
func (u *DirectLinkUpsertOne) SetSpeed(v int) *DirectLinkUpsertOne {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.SetSpeed(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddSpeed adds v to the "speed" field.
|
||||
func (u *DirectLinkUpsertOne) AddSpeed(v int) *DirectLinkUpsertOne {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.AddSpeed(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateSpeed sets the "speed" field to the value that was provided on create.
|
||||
func (u *DirectLinkUpsertOne) UpdateSpeed() *DirectLinkUpsertOne {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.UpdateSpeed()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *DirectLinkUpsertOne) Exec(ctx context.Context) error {
|
||||
if len(u.create.conflict) == 0 {
|
||||
return errors.New("ent: missing options for DirectLinkCreate.OnConflict")
|
||||
}
|
||||
return u.create.Exec(ctx)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (u *DirectLinkUpsertOne) ExecX(ctx context.Context) {
|
||||
if err := u.create.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Exec executes the UPSERT query and returns the inserted/updated ID.
|
||||
func (u *DirectLinkUpsertOne) ID(ctx context.Context) (id int, err error) {
|
||||
node, err := u.create.Save(ctx)
|
||||
if err != nil {
|
||||
return id, err
|
||||
}
|
||||
return node.ID, nil
|
||||
}
|
||||
|
||||
// IDX is like ID, but panics if an error occurs.
|
||||
func (u *DirectLinkUpsertOne) IDX(ctx context.Context) int {
|
||||
id, err := u.ID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
func (m *DirectLinkCreate) SetRawID(t int) *DirectLinkCreate {
|
||||
m.mutation.SetRawID(t)
|
||||
return m
|
||||
}
|
||||
|
||||
// DirectLinkCreateBulk is the builder for creating many DirectLink entities in bulk.
|
||||
type DirectLinkCreateBulk struct {
|
||||
config
|
||||
err error
|
||||
builders []*DirectLinkCreate
|
||||
conflict []sql.ConflictOption
|
||||
}
|
||||
|
||||
// Save creates the DirectLink entities in the database.
|
||||
func (dlcb *DirectLinkCreateBulk) Save(ctx context.Context) ([]*DirectLink, error) {
|
||||
if dlcb.err != nil {
|
||||
return nil, dlcb.err
|
||||
}
|
||||
specs := make([]*sqlgraph.CreateSpec, len(dlcb.builders))
|
||||
nodes := make([]*DirectLink, len(dlcb.builders))
|
||||
mutators := make([]Mutator, len(dlcb.builders))
|
||||
for i := range dlcb.builders {
|
||||
func(i int, root context.Context) {
|
||||
builder := dlcb.builders[i]
|
||||
builder.defaults()
|
||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||
mutation, ok := m.(*DirectLinkMutation)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||
}
|
||||
if err := builder.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
builder.mutation = mutation
|
||||
var err error
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, dlcb.builders[i+1].mutation)
|
||||
} else {
|
||||
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||
spec.OnConflict = dlcb.conflict
|
||||
// Invoke the actual operation on the latest mutation in the chain.
|
||||
if err = sqlgraph.BatchCreate(ctx, dlcb.driver, spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mutation.id = &nodes[i].ID
|
||||
if specs[i].ID.Value != nil {
|
||||
id := specs[i].ID.Value.(int64)
|
||||
nodes[i].ID = int(id)
|
||||
}
|
||||
mutation.done = true
|
||||
return nodes[i], nil
|
||||
})
|
||||
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||
mut = builder.hooks[i](mut)
|
||||
}
|
||||
mutators[i] = mut
|
||||
}(i, ctx)
|
||||
}
|
||||
if len(mutators) > 0 {
|
||||
if _, err := mutators[0].Mutate(ctx, dlcb.builders[0].mutation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (dlcb *DirectLinkCreateBulk) SaveX(ctx context.Context) []*DirectLink {
|
||||
v, err := dlcb.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (dlcb *DirectLinkCreateBulk) Exec(ctx context.Context) error {
|
||||
_, err := dlcb.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (dlcb *DirectLinkCreateBulk) ExecX(ctx context.Context) {
|
||||
if err := dlcb.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||
// of the `INSERT` statement. For example:
|
||||
//
|
||||
// client.DirectLink.CreateBulk(builders...).
|
||||
// OnConflict(
|
||||
// // Update the row with the new values
|
||||
// // the was proposed for insertion.
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// // Override some of the fields with custom
|
||||
// // update values.
|
||||
// Update(func(u *ent.DirectLinkUpsert) {
|
||||
// SetCreatedAt(v+v).
|
||||
// }).
|
||||
// Exec(ctx)
|
||||
func (dlcb *DirectLinkCreateBulk) OnConflict(opts ...sql.ConflictOption) *DirectLinkUpsertBulk {
|
||||
dlcb.conflict = opts
|
||||
return &DirectLinkUpsertBulk{
|
||||
create: dlcb,
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||
// as conflict target. Using this option is equivalent to using:
|
||||
//
|
||||
// client.DirectLink.Create().
|
||||
// OnConflict(sql.ConflictColumns(columns...)).
|
||||
// Exec(ctx)
|
||||
func (dlcb *DirectLinkCreateBulk) OnConflictColumns(columns ...string) *DirectLinkUpsertBulk {
|
||||
dlcb.conflict = append(dlcb.conflict, sql.ConflictColumns(columns...))
|
||||
return &DirectLinkUpsertBulk{
|
||||
create: dlcb,
|
||||
}
|
||||
}
|
||||
|
||||
// DirectLinkUpsertBulk is the builder for "upsert"-ing
|
||||
// a bulk of DirectLink nodes.
|
||||
type DirectLinkUpsertBulk struct {
|
||||
create *DirectLinkCreateBulk
|
||||
}
|
||||
|
||||
// UpdateNewValues updates the mutable fields using the new values that
|
||||
// were set on create. Using this option is equivalent to using:
|
||||
//
|
||||
// client.DirectLink.Create().
|
||||
// OnConflict(
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// Exec(ctx)
|
||||
func (u *DirectLinkUpsertBulk) UpdateNewValues() *DirectLinkUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||
for _, b := range u.create.builders {
|
||||
if _, exists := b.mutation.CreatedAt(); exists {
|
||||
s.SetIgnore(directlink.FieldCreatedAt)
|
||||
}
|
||||
}
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// Ignore sets each column to itself in case of conflict.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.DirectLink.Create().
|
||||
// OnConflict(sql.ResolveWithIgnore()).
|
||||
// Exec(ctx)
|
||||
func (u *DirectLinkUpsertBulk) Ignore() *DirectLinkUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||
return u
|
||||
}
|
||||
|
||||
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||
// Supported only by SQLite and PostgreSQL.
|
||||
func (u *DirectLinkUpsertBulk) DoNothing() *DirectLinkUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||
return u
|
||||
}
|
||||
|
||||
// Update allows overriding fields `UPDATE` values. See the DirectLinkCreateBulk.OnConflict
|
||||
// documentation for more info.
|
||||
func (u *DirectLinkUpsertBulk) Update(set func(*DirectLinkUpsert)) *DirectLinkUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||
set(&DirectLinkUpsert{UpdateSet: update})
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (u *DirectLinkUpsertBulk) SetUpdatedAt(v time.Time) *DirectLinkUpsertBulk {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.SetUpdatedAt(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||
func (u *DirectLinkUpsertBulk) UpdateUpdatedAt() *DirectLinkUpsertBulk {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.UpdateUpdatedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// SetDeletedAt sets the "deleted_at" field.
|
||||
func (u *DirectLinkUpsertBulk) SetDeletedAt(v time.Time) *DirectLinkUpsertBulk {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.SetDeletedAt(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create.
|
||||
func (u *DirectLinkUpsertBulk) UpdateDeletedAt() *DirectLinkUpsertBulk {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.UpdateDeletedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearDeletedAt clears the value of the "deleted_at" field.
|
||||
func (u *DirectLinkUpsertBulk) ClearDeletedAt() *DirectLinkUpsertBulk {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.ClearDeletedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (u *DirectLinkUpsertBulk) SetName(v string) *DirectLinkUpsertBulk {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.SetName(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateName sets the "name" field to the value that was provided on create.
|
||||
func (u *DirectLinkUpsertBulk) UpdateName() *DirectLinkUpsertBulk {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.UpdateName()
|
||||
})
|
||||
}
|
||||
|
||||
// SetDownloads sets the "downloads" field.
|
||||
func (u *DirectLinkUpsertBulk) SetDownloads(v int) *DirectLinkUpsertBulk {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.SetDownloads(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddDownloads adds v to the "downloads" field.
|
||||
func (u *DirectLinkUpsertBulk) AddDownloads(v int) *DirectLinkUpsertBulk {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.AddDownloads(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateDownloads sets the "downloads" field to the value that was provided on create.
|
||||
func (u *DirectLinkUpsertBulk) UpdateDownloads() *DirectLinkUpsertBulk {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.UpdateDownloads()
|
||||
})
|
||||
}
|
||||
|
||||
// SetFileID sets the "file_id" field.
|
||||
func (u *DirectLinkUpsertBulk) SetFileID(v int) *DirectLinkUpsertBulk {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.SetFileID(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateFileID sets the "file_id" field to the value that was provided on create.
|
||||
func (u *DirectLinkUpsertBulk) UpdateFileID() *DirectLinkUpsertBulk {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.UpdateFileID()
|
||||
})
|
||||
}
|
||||
|
||||
// SetSpeed sets the "speed" field.
|
||||
func (u *DirectLinkUpsertBulk) SetSpeed(v int) *DirectLinkUpsertBulk {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.SetSpeed(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddSpeed adds v to the "speed" field.
|
||||
func (u *DirectLinkUpsertBulk) AddSpeed(v int) *DirectLinkUpsertBulk {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.AddSpeed(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateSpeed sets the "speed" field to the value that was provided on create.
|
||||
func (u *DirectLinkUpsertBulk) UpdateSpeed() *DirectLinkUpsertBulk {
|
||||
return u.Update(func(s *DirectLinkUpsert) {
|
||||
s.UpdateSpeed()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *DirectLinkUpsertBulk) Exec(ctx context.Context) error {
|
||||
if u.create.err != nil {
|
||||
return u.create.err
|
||||
}
|
||||
for i, b := range u.create.builders {
|
||||
if len(b.conflict) != 0 {
|
||||
return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the DirectLinkCreateBulk instead", i)
|
||||
}
|
||||
}
|
||||
if len(u.create.conflict) == 0 {
|
||||
return errors.New("ent: missing options for DirectLinkCreateBulk.OnConflict")
|
||||
}
|
||||
return u.create.Exec(ctx)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (u *DirectLinkUpsertBulk) ExecX(ctx context.Context) {
|
||||
if err := u.create.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
88
ent/directlink_delete.go
Normal file
88
ent/directlink_delete.go
Normal file
@@ -0,0 +1,88 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
|
||||
)
|
||||
|
||||
// DirectLinkDelete is the builder for deleting a DirectLink entity.
|
||||
type DirectLinkDelete struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *DirectLinkMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the DirectLinkDelete builder.
|
||||
func (dld *DirectLinkDelete) Where(ps ...predicate.DirectLink) *DirectLinkDelete {
|
||||
dld.mutation.Where(ps...)
|
||||
return dld
|
||||
}
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (dld *DirectLinkDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, dld.sqlExec, dld.mutation, dld.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (dld *DirectLinkDelete) ExecX(ctx context.Context) int {
|
||||
n, err := dld.Exec(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (dld *DirectLinkDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
_spec := sqlgraph.NewDeleteSpec(directlink.Table, sqlgraph.NewFieldSpec(directlink.FieldID, field.TypeInt))
|
||||
if ps := dld.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
affected, err := sqlgraph.DeleteNodes(ctx, dld.driver, _spec)
|
||||
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
dld.mutation.done = true
|
||||
return affected, err
|
||||
}
|
||||
|
||||
// DirectLinkDeleteOne is the builder for deleting a single DirectLink entity.
|
||||
type DirectLinkDeleteOne struct {
|
||||
dld *DirectLinkDelete
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the DirectLinkDelete builder.
|
||||
func (dldo *DirectLinkDeleteOne) Where(ps ...predicate.DirectLink) *DirectLinkDeleteOne {
|
||||
dldo.dld.mutation.Where(ps...)
|
||||
return dldo
|
||||
}
|
||||
|
||||
// Exec executes the deletion query.
|
||||
func (dldo *DirectLinkDeleteOne) Exec(ctx context.Context) error {
|
||||
n, err := dldo.dld.Exec(ctx)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case n == 0:
|
||||
return &NotFoundError{directlink.Label}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (dldo *DirectLinkDeleteOne) ExecX(ctx context.Context) {
|
||||
if err := dldo.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
605
ent/directlink_query.go
Normal file
605
ent/directlink_query.go
Normal file
@@ -0,0 +1,605 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/file"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
|
||||
)
|
||||
|
||||
// DirectLinkQuery is the builder for querying DirectLink entities.
|
||||
type DirectLinkQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []directlink.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.DirectLink
|
||||
withFile *FileQuery
|
||||
// intermediate query (i.e. traversal path).
|
||||
sql *sql.Selector
|
||||
path func(context.Context) (*sql.Selector, error)
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the DirectLinkQuery builder.
|
||||
func (dlq *DirectLinkQuery) Where(ps ...predicate.DirectLink) *DirectLinkQuery {
|
||||
dlq.predicates = append(dlq.predicates, ps...)
|
||||
return dlq
|
||||
}
|
||||
|
||||
// Limit the number of records to be returned by this query.
|
||||
func (dlq *DirectLinkQuery) Limit(limit int) *DirectLinkQuery {
|
||||
dlq.ctx.Limit = &limit
|
||||
return dlq
|
||||
}
|
||||
|
||||
// Offset to start from.
|
||||
func (dlq *DirectLinkQuery) Offset(offset int) *DirectLinkQuery {
|
||||
dlq.ctx.Offset = &offset
|
||||
return dlq
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (dlq *DirectLinkQuery) Unique(unique bool) *DirectLinkQuery {
|
||||
dlq.ctx.Unique = &unique
|
||||
return dlq
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (dlq *DirectLinkQuery) Order(o ...directlink.OrderOption) *DirectLinkQuery {
|
||||
dlq.order = append(dlq.order, o...)
|
||||
return dlq
|
||||
}
|
||||
|
||||
// QueryFile chains the current query on the "file" edge.
|
||||
func (dlq *DirectLinkQuery) QueryFile() *FileQuery {
|
||||
query := (&FileClient{config: dlq.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := dlq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := dlq.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(directlink.Table, directlink.FieldID, selector),
|
||||
sqlgraph.To(file.Table, file.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, directlink.FileTable, directlink.FileColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(dlq.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// First returns the first DirectLink entity from the query.
|
||||
// Returns a *NotFoundError when no DirectLink was found.
|
||||
func (dlq *DirectLinkQuery) First(ctx context.Context) (*DirectLink, error) {
|
||||
nodes, err := dlq.Limit(1).All(setContextOp(ctx, dlq.ctx, "First"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nil, &NotFoundError{directlink.Label}
|
||||
}
|
||||
return nodes[0], nil
|
||||
}
|
||||
|
||||
// FirstX is like First, but panics if an error occurs.
|
||||
func (dlq *DirectLinkQuery) FirstX(ctx context.Context) *DirectLink {
|
||||
node, err := dlq.First(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// FirstID returns the first DirectLink ID from the query.
|
||||
// Returns a *NotFoundError when no DirectLink ID was found.
|
||||
func (dlq *DirectLinkQuery) FirstID(ctx context.Context) (id int, err error) {
|
||||
var ids []int
|
||||
if ids, err = dlq.Limit(1).IDs(setContextOp(ctx, dlq.ctx, "FirstID")); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
err = &NotFoundError{directlink.Label}
|
||||
return
|
||||
}
|
||||
return ids[0], nil
|
||||
}
|
||||
|
||||
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||
func (dlq *DirectLinkQuery) FirstIDX(ctx context.Context) int {
|
||||
id, err := dlq.FirstID(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// Only returns a single DirectLink entity found by the query, ensuring it only returns one.
|
||||
// Returns a *NotSingularError when more than one DirectLink entity is found.
|
||||
// Returns a *NotFoundError when no DirectLink entities are found.
|
||||
func (dlq *DirectLinkQuery) Only(ctx context.Context) (*DirectLink, error) {
|
||||
nodes, err := dlq.Limit(2).All(setContextOp(ctx, dlq.ctx, "Only"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(nodes) {
|
||||
case 1:
|
||||
return nodes[0], nil
|
||||
case 0:
|
||||
return nil, &NotFoundError{directlink.Label}
|
||||
default:
|
||||
return nil, &NotSingularError{directlink.Label}
|
||||
}
|
||||
}
|
||||
|
||||
// OnlyX is like Only, but panics if an error occurs.
|
||||
func (dlq *DirectLinkQuery) OnlyX(ctx context.Context) *DirectLink {
|
||||
node, err := dlq.Only(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// OnlyID is like Only, but returns the only DirectLink ID in the query.
|
||||
// Returns a *NotSingularError when more than one DirectLink ID is found.
|
||||
// Returns a *NotFoundError when no entities are found.
|
||||
func (dlq *DirectLinkQuery) OnlyID(ctx context.Context) (id int, err error) {
|
||||
var ids []int
|
||||
if ids, err = dlq.Limit(2).IDs(setContextOp(ctx, dlq.ctx, "OnlyID")); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
case 1:
|
||||
id = ids[0]
|
||||
case 0:
|
||||
err = &NotFoundError{directlink.Label}
|
||||
default:
|
||||
err = &NotSingularError{directlink.Label}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||
func (dlq *DirectLinkQuery) OnlyIDX(ctx context.Context) int {
|
||||
id, err := dlq.OnlyID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// All executes the query and returns a list of DirectLinks.
|
||||
func (dlq *DirectLinkQuery) All(ctx context.Context) ([]*DirectLink, error) {
|
||||
ctx = setContextOp(ctx, dlq.ctx, "All")
|
||||
if err := dlq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qr := querierAll[[]*DirectLink, *DirectLinkQuery]()
|
||||
return withInterceptors[[]*DirectLink](ctx, dlq, qr, dlq.inters)
|
||||
}
|
||||
|
||||
// AllX is like All, but panics if an error occurs.
|
||||
func (dlq *DirectLinkQuery) AllX(ctx context.Context) []*DirectLink {
|
||||
nodes, err := dlq.All(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// IDs executes the query and returns a list of DirectLink IDs.
|
||||
func (dlq *DirectLinkQuery) IDs(ctx context.Context) (ids []int, err error) {
|
||||
if dlq.ctx.Unique == nil && dlq.path != nil {
|
||||
dlq.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, dlq.ctx, "IDs")
|
||||
if err = dlq.Select(directlink.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// IDsX is like IDs, but panics if an error occurs.
|
||||
func (dlq *DirectLinkQuery) IDsX(ctx context.Context) []int {
|
||||
ids, err := dlq.IDs(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// Count returns the count of the given query.
|
||||
func (dlq *DirectLinkQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, dlq.ctx, "Count")
|
||||
if err := dlq.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return withInterceptors[int](ctx, dlq, querierCount[*DirectLinkQuery](), dlq.inters)
|
||||
}
|
||||
|
||||
// CountX is like Count, but panics if an error occurs.
|
||||
func (dlq *DirectLinkQuery) CountX(ctx context.Context) int {
|
||||
count, err := dlq.Count(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (dlq *DirectLinkQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, dlq.ctx, "Exist")
|
||||
switch _, err := dlq.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExistX is like Exist, but panics if an error occurs.
|
||||
func (dlq *DirectLinkQuery) ExistX(ctx context.Context) bool {
|
||||
exist, err := dlq.Exist(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return exist
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the DirectLinkQuery builder, including all associated steps. It can be
|
||||
// used to prepare common query builders and use them differently after the clone is made.
|
||||
func (dlq *DirectLinkQuery) Clone() *DirectLinkQuery {
|
||||
if dlq == nil {
|
||||
return nil
|
||||
}
|
||||
return &DirectLinkQuery{
|
||||
config: dlq.config,
|
||||
ctx: dlq.ctx.Clone(),
|
||||
order: append([]directlink.OrderOption{}, dlq.order...),
|
||||
inters: append([]Interceptor{}, dlq.inters...),
|
||||
predicates: append([]predicate.DirectLink{}, dlq.predicates...),
|
||||
withFile: dlq.withFile.Clone(),
|
||||
// clone intermediate query.
|
||||
sql: dlq.sql.Clone(),
|
||||
path: dlq.path,
|
||||
}
|
||||
}
|
||||
|
||||
// WithFile tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "file" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (dlq *DirectLinkQuery) WithFile(opts ...func(*FileQuery)) *DirectLinkQuery {
|
||||
query := (&FileClient{config: dlq.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
dlq.withFile = query
|
||||
return dlq
|
||||
}
|
||||
|
||||
// GroupBy is used to group vertices by one or more fields/columns.
|
||||
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// Count int `json:"count,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.DirectLink.Query().
|
||||
// GroupBy(directlink.FieldCreatedAt).
|
||||
// Aggregate(ent.Count()).
|
||||
// Scan(ctx, &v)
|
||||
func (dlq *DirectLinkQuery) GroupBy(field string, fields ...string) *DirectLinkGroupBy {
|
||||
dlq.ctx.Fields = append([]string{field}, fields...)
|
||||
grbuild := &DirectLinkGroupBy{build: dlq}
|
||||
grbuild.flds = &dlq.ctx.Fields
|
||||
grbuild.label = directlink.Label
|
||||
grbuild.scan = grbuild.Scan
|
||||
return grbuild
|
||||
}
|
||||
|
||||
// Select allows the selection one or more fields/columns for the given query,
|
||||
// instead of selecting all fields in the entity.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.DirectLink.Query().
|
||||
// Select(directlink.FieldCreatedAt).
|
||||
// Scan(ctx, &v)
|
||||
func (dlq *DirectLinkQuery) Select(fields ...string) *DirectLinkSelect {
|
||||
dlq.ctx.Fields = append(dlq.ctx.Fields, fields...)
|
||||
sbuild := &DirectLinkSelect{DirectLinkQuery: dlq}
|
||||
sbuild.label = directlink.Label
|
||||
sbuild.flds, sbuild.scan = &dlq.ctx.Fields, sbuild.Scan
|
||||
return sbuild
|
||||
}
|
||||
|
||||
// Aggregate returns a DirectLinkSelect configured with the given aggregations.
|
||||
func (dlq *DirectLinkQuery) Aggregate(fns ...AggregateFunc) *DirectLinkSelect {
|
||||
return dlq.Select().Aggregate(fns...)
|
||||
}
|
||||
|
||||
func (dlq *DirectLinkQuery) prepareQuery(ctx context.Context) error {
|
||||
for _, inter := range dlq.inters {
|
||||
if inter == nil {
|
||||
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||
}
|
||||
if trv, ok := inter.(Traverser); ok {
|
||||
if err := trv.Traverse(ctx, dlq); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, f := range dlq.ctx.Fields {
|
||||
if !directlink.ValidColumn(f) {
|
||||
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
}
|
||||
if dlq.path != nil {
|
||||
prev, err := dlq.path(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dlq.sql = prev
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dlq *DirectLinkQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*DirectLink, error) {
|
||||
var (
|
||||
nodes = []*DirectLink{}
|
||||
_spec = dlq.querySpec()
|
||||
loadedTypes = [1]bool{
|
||||
dlq.withFile != nil,
|
||||
}
|
||||
)
|
||||
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||
return (*DirectLink).scanValues(nil, columns)
|
||||
}
|
||||
_spec.Assign = func(columns []string, values []any) error {
|
||||
node := &DirectLink{config: dlq.config}
|
||||
nodes = append(nodes, node)
|
||||
node.Edges.loadedTypes = loadedTypes
|
||||
return node.assignValues(columns, values)
|
||||
}
|
||||
for i := range hooks {
|
||||
hooks[i](ctx, _spec)
|
||||
}
|
||||
if err := sqlgraph.QueryNodes(ctx, dlq.driver, _spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nodes, nil
|
||||
}
|
||||
if query := dlq.withFile; query != nil {
|
||||
if err := dlq.loadFile(ctx, query, nodes, nil,
|
||||
func(n *DirectLink, e *File) { n.Edges.File = e }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (dlq *DirectLinkQuery) loadFile(ctx context.Context, query *FileQuery, nodes []*DirectLink, init func(*DirectLink), assign func(*DirectLink, *File)) error {
|
||||
ids := make([]int, 0, len(nodes))
|
||||
nodeids := make(map[int][]*DirectLink)
|
||||
for i := range nodes {
|
||||
fk := nodes[i].FileID
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
query.Where(file.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
nodes, ok := nodeids[n.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "file_id" returned %v`, n.ID)
|
||||
}
|
||||
for i := range nodes {
|
||||
assign(nodes[i], n)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dlq *DirectLinkQuery) sqlCount(ctx context.Context) (int, error) {
|
||||
_spec := dlq.querySpec()
|
||||
_spec.Node.Columns = dlq.ctx.Fields
|
||||
if len(dlq.ctx.Fields) > 0 {
|
||||
_spec.Unique = dlq.ctx.Unique != nil && *dlq.ctx.Unique
|
||||
}
|
||||
return sqlgraph.CountNodes(ctx, dlq.driver, _spec)
|
||||
}
|
||||
|
||||
func (dlq *DirectLinkQuery) querySpec() *sqlgraph.QuerySpec {
|
||||
_spec := sqlgraph.NewQuerySpec(directlink.Table, directlink.Columns, sqlgraph.NewFieldSpec(directlink.FieldID, field.TypeInt))
|
||||
_spec.From = dlq.sql
|
||||
if unique := dlq.ctx.Unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
} else if dlq.path != nil {
|
||||
_spec.Unique = true
|
||||
}
|
||||
if fields := dlq.ctx.Fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, directlink.FieldID)
|
||||
for i := range fields {
|
||||
if fields[i] != directlink.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||
}
|
||||
}
|
||||
if dlq.withFile != nil {
|
||||
_spec.Node.AddColumnOnce(directlink.FieldFileID)
|
||||
}
|
||||
}
|
||||
if ps := dlq.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if limit := dlq.ctx.Limit; limit != nil {
|
||||
_spec.Limit = *limit
|
||||
}
|
||||
if offset := dlq.ctx.Offset; offset != nil {
|
||||
_spec.Offset = *offset
|
||||
}
|
||||
if ps := dlq.order; len(ps) > 0 {
|
||||
_spec.Order = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
return _spec
|
||||
}
|
||||
|
||||
func (dlq *DirectLinkQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(dlq.driver.Dialect())
|
||||
t1 := builder.Table(directlink.Table)
|
||||
columns := dlq.ctx.Fields
|
||||
if len(columns) == 0 {
|
||||
columns = directlink.Columns
|
||||
}
|
||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||
if dlq.sql != nil {
|
||||
selector = dlq.sql
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
if dlq.ctx.Unique != nil && *dlq.ctx.Unique {
|
||||
selector.Distinct()
|
||||
}
|
||||
for _, p := range dlq.predicates {
|
||||
p(selector)
|
||||
}
|
||||
for _, p := range dlq.order {
|
||||
p(selector)
|
||||
}
|
||||
if offset := dlq.ctx.Offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
// with default value, and override it below if needed.
|
||||
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||
}
|
||||
if limit := dlq.ctx.Limit; limit != nil {
|
||||
selector.Limit(*limit)
|
||||
}
|
||||
return selector
|
||||
}
|
||||
|
||||
// DirectLinkGroupBy is the group-by builder for DirectLink entities.
|
||||
type DirectLinkGroupBy struct {
|
||||
selector
|
||||
build *DirectLinkQuery
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the group-by query.
|
||||
func (dlgb *DirectLinkGroupBy) Aggregate(fns ...AggregateFunc) *DirectLinkGroupBy {
|
||||
dlgb.fns = append(dlgb.fns, fns...)
|
||||
return dlgb
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (dlgb *DirectLinkGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, dlgb.build.ctx, "GroupBy")
|
||||
if err := dlgb.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*DirectLinkQuery, *DirectLinkGroupBy](ctx, dlgb.build, dlgb, dlgb.build.inters, v)
|
||||
}
|
||||
|
||||
func (dlgb *DirectLinkGroupBy) sqlScan(ctx context.Context, root *DirectLinkQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx).Select()
|
||||
aggregation := make([]string, 0, len(dlgb.fns))
|
||||
for _, fn := range dlgb.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
if len(selector.SelectedColumns()) == 0 {
|
||||
columns := make([]string, 0, len(*dlgb.flds)+len(dlgb.fns))
|
||||
for _, f := range *dlgb.flds {
|
||||
columns = append(columns, selector.C(f))
|
||||
}
|
||||
columns = append(columns, aggregation...)
|
||||
selector.Select(columns...)
|
||||
}
|
||||
selector.GroupBy(selector.Columns(*dlgb.flds...)...)
|
||||
if err := selector.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := dlgb.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
// DirectLinkSelect is the builder for selecting fields of DirectLink entities.
|
||||
type DirectLinkSelect struct {
|
||||
*DirectLinkQuery
|
||||
selector
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the selector query.
|
||||
func (dls *DirectLinkSelect) Aggregate(fns ...AggregateFunc) *DirectLinkSelect {
|
||||
dls.fns = append(dls.fns, fns...)
|
||||
return dls
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (dls *DirectLinkSelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, dls.ctx, "Select")
|
||||
if err := dls.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*DirectLinkQuery, *DirectLinkSelect](ctx, dls.DirectLinkQuery, dls, dls.inters, v)
|
||||
}
|
||||
|
||||
func (dls *DirectLinkSelect) sqlScan(ctx context.Context, root *DirectLinkQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx)
|
||||
aggregation := make([]string, 0, len(dls.fns))
|
||||
for _, fn := range dls.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
switch n := len(*dls.selector.flds); {
|
||||
case n == 0 && len(aggregation) > 0:
|
||||
selector.Select(aggregation...)
|
||||
case n != 0 && len(aggregation) > 0:
|
||||
selector.AppendSelect(aggregation...)
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := dls.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
549
ent/directlink_update.go
Normal file
549
ent/directlink_update.go
Normal file
@@ -0,0 +1,549 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/file"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
|
||||
)
|
||||
|
||||
// DirectLinkUpdate is the builder for updating DirectLink entities.
|
||||
type DirectLinkUpdate struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *DirectLinkMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the DirectLinkUpdate builder.
|
||||
func (dlu *DirectLinkUpdate) Where(ps ...predicate.DirectLink) *DirectLinkUpdate {
|
||||
dlu.mutation.Where(ps...)
|
||||
return dlu
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (dlu *DirectLinkUpdate) SetUpdatedAt(t time.Time) *DirectLinkUpdate {
|
||||
dlu.mutation.SetUpdatedAt(t)
|
||||
return dlu
|
||||
}
|
||||
|
||||
// SetDeletedAt sets the "deleted_at" field.
|
||||
func (dlu *DirectLinkUpdate) SetDeletedAt(t time.Time) *DirectLinkUpdate {
|
||||
dlu.mutation.SetDeletedAt(t)
|
||||
return dlu
|
||||
}
|
||||
|
||||
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
|
||||
func (dlu *DirectLinkUpdate) SetNillableDeletedAt(t *time.Time) *DirectLinkUpdate {
|
||||
if t != nil {
|
||||
dlu.SetDeletedAt(*t)
|
||||
}
|
||||
return dlu
|
||||
}
|
||||
|
||||
// ClearDeletedAt clears the value of the "deleted_at" field.
|
||||
func (dlu *DirectLinkUpdate) ClearDeletedAt() *DirectLinkUpdate {
|
||||
dlu.mutation.ClearDeletedAt()
|
||||
return dlu
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (dlu *DirectLinkUpdate) SetName(s string) *DirectLinkUpdate {
|
||||
dlu.mutation.SetName(s)
|
||||
return dlu
|
||||
}
|
||||
|
||||
// SetNillableName sets the "name" field if the given value is not nil.
|
||||
func (dlu *DirectLinkUpdate) SetNillableName(s *string) *DirectLinkUpdate {
|
||||
if s != nil {
|
||||
dlu.SetName(*s)
|
||||
}
|
||||
return dlu
|
||||
}
|
||||
|
||||
// SetDownloads sets the "downloads" field.
|
||||
func (dlu *DirectLinkUpdate) SetDownloads(i int) *DirectLinkUpdate {
|
||||
dlu.mutation.ResetDownloads()
|
||||
dlu.mutation.SetDownloads(i)
|
||||
return dlu
|
||||
}
|
||||
|
||||
// SetNillableDownloads sets the "downloads" field if the given value is not nil.
|
||||
func (dlu *DirectLinkUpdate) SetNillableDownloads(i *int) *DirectLinkUpdate {
|
||||
if i != nil {
|
||||
dlu.SetDownloads(*i)
|
||||
}
|
||||
return dlu
|
||||
}
|
||||
|
||||
// AddDownloads adds i to the "downloads" field.
|
||||
func (dlu *DirectLinkUpdate) AddDownloads(i int) *DirectLinkUpdate {
|
||||
dlu.mutation.AddDownloads(i)
|
||||
return dlu
|
||||
}
|
||||
|
||||
// SetFileID sets the "file_id" field.
|
||||
func (dlu *DirectLinkUpdate) SetFileID(i int) *DirectLinkUpdate {
|
||||
dlu.mutation.SetFileID(i)
|
||||
return dlu
|
||||
}
|
||||
|
||||
// SetNillableFileID sets the "file_id" field if the given value is not nil.
|
||||
func (dlu *DirectLinkUpdate) SetNillableFileID(i *int) *DirectLinkUpdate {
|
||||
if i != nil {
|
||||
dlu.SetFileID(*i)
|
||||
}
|
||||
return dlu
|
||||
}
|
||||
|
||||
// SetSpeed sets the "speed" field.
|
||||
func (dlu *DirectLinkUpdate) SetSpeed(i int) *DirectLinkUpdate {
|
||||
dlu.mutation.ResetSpeed()
|
||||
dlu.mutation.SetSpeed(i)
|
||||
return dlu
|
||||
}
|
||||
|
||||
// SetNillableSpeed sets the "speed" field if the given value is not nil.
|
||||
func (dlu *DirectLinkUpdate) SetNillableSpeed(i *int) *DirectLinkUpdate {
|
||||
if i != nil {
|
||||
dlu.SetSpeed(*i)
|
||||
}
|
||||
return dlu
|
||||
}
|
||||
|
||||
// AddSpeed adds i to the "speed" field.
|
||||
func (dlu *DirectLinkUpdate) AddSpeed(i int) *DirectLinkUpdate {
|
||||
dlu.mutation.AddSpeed(i)
|
||||
return dlu
|
||||
}
|
||||
|
||||
// SetFile sets the "file" edge to the File entity.
|
||||
func (dlu *DirectLinkUpdate) SetFile(f *File) *DirectLinkUpdate {
|
||||
return dlu.SetFileID(f.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the DirectLinkMutation object of the builder.
|
||||
func (dlu *DirectLinkUpdate) Mutation() *DirectLinkMutation {
|
||||
return dlu.mutation
|
||||
}
|
||||
|
||||
// ClearFile clears the "file" edge to the File entity.
|
||||
func (dlu *DirectLinkUpdate) ClearFile() *DirectLinkUpdate {
|
||||
dlu.mutation.ClearFile()
|
||||
return dlu
|
||||
}
|
||||
|
||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||
func (dlu *DirectLinkUpdate) Save(ctx context.Context) (int, error) {
|
||||
if err := dlu.defaults(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return withHooks(ctx, dlu.sqlSave, dlu.mutation, dlu.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (dlu *DirectLinkUpdate) SaveX(ctx context.Context) int {
|
||||
affected, err := dlu.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return affected
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (dlu *DirectLinkUpdate) Exec(ctx context.Context) error {
|
||||
_, err := dlu.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (dlu *DirectLinkUpdate) ExecX(ctx context.Context) {
|
||||
if err := dlu.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (dlu *DirectLinkUpdate) defaults() error {
|
||||
if _, ok := dlu.mutation.UpdatedAt(); !ok {
|
||||
if directlink.UpdateDefaultUpdatedAt == nil {
|
||||
return fmt.Errorf("ent: uninitialized directlink.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
|
||||
}
|
||||
v := directlink.UpdateDefaultUpdatedAt()
|
||||
dlu.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (dlu *DirectLinkUpdate) check() error {
|
||||
if _, ok := dlu.mutation.FileID(); dlu.mutation.FileCleared() && !ok {
|
||||
return errors.New(`ent: clearing a required unique edge "DirectLink.file"`)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dlu *DirectLinkUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
if err := dlu.check(); err != nil {
|
||||
return n, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(directlink.Table, directlink.Columns, sqlgraph.NewFieldSpec(directlink.FieldID, field.TypeInt))
|
||||
if ps := dlu.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := dlu.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(directlink.FieldUpdatedAt, field.TypeTime, value)
|
||||
}
|
||||
if value, ok := dlu.mutation.DeletedAt(); ok {
|
||||
_spec.SetField(directlink.FieldDeletedAt, field.TypeTime, value)
|
||||
}
|
||||
if dlu.mutation.DeletedAtCleared() {
|
||||
_spec.ClearField(directlink.FieldDeletedAt, field.TypeTime)
|
||||
}
|
||||
if value, ok := dlu.mutation.Name(); ok {
|
||||
_spec.SetField(directlink.FieldName, field.TypeString, value)
|
||||
}
|
||||
if value, ok := dlu.mutation.Downloads(); ok {
|
||||
_spec.SetField(directlink.FieldDownloads, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := dlu.mutation.AddedDownloads(); ok {
|
||||
_spec.AddField(directlink.FieldDownloads, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := dlu.mutation.Speed(); ok {
|
||||
_spec.SetField(directlink.FieldSpeed, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := dlu.mutation.AddedSpeed(); ok {
|
||||
_spec.AddField(directlink.FieldSpeed, field.TypeInt, value)
|
||||
}
|
||||
if dlu.mutation.FileCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: directlink.FileTable,
|
||||
Columns: []string{directlink.FileColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(file.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := dlu.mutation.FileIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: directlink.FileTable,
|
||||
Columns: []string{directlink.FileColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(file.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if n, err = sqlgraph.UpdateNodes(ctx, dlu.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{directlink.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
dlu.mutation.done = true
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// DirectLinkUpdateOne is the builder for updating a single DirectLink entity.
|
||||
type DirectLinkUpdateOne struct {
|
||||
config
|
||||
fields []string
|
||||
hooks []Hook
|
||||
mutation *DirectLinkMutation
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (dluo *DirectLinkUpdateOne) SetUpdatedAt(t time.Time) *DirectLinkUpdateOne {
|
||||
dluo.mutation.SetUpdatedAt(t)
|
||||
return dluo
|
||||
}
|
||||
|
||||
// SetDeletedAt sets the "deleted_at" field.
|
||||
func (dluo *DirectLinkUpdateOne) SetDeletedAt(t time.Time) *DirectLinkUpdateOne {
|
||||
dluo.mutation.SetDeletedAt(t)
|
||||
return dluo
|
||||
}
|
||||
|
||||
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
|
||||
func (dluo *DirectLinkUpdateOne) SetNillableDeletedAt(t *time.Time) *DirectLinkUpdateOne {
|
||||
if t != nil {
|
||||
dluo.SetDeletedAt(*t)
|
||||
}
|
||||
return dluo
|
||||
}
|
||||
|
||||
// ClearDeletedAt clears the value of the "deleted_at" field.
|
||||
func (dluo *DirectLinkUpdateOne) ClearDeletedAt() *DirectLinkUpdateOne {
|
||||
dluo.mutation.ClearDeletedAt()
|
||||
return dluo
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (dluo *DirectLinkUpdateOne) SetName(s string) *DirectLinkUpdateOne {
|
||||
dluo.mutation.SetName(s)
|
||||
return dluo
|
||||
}
|
||||
|
||||
// SetNillableName sets the "name" field if the given value is not nil.
|
||||
func (dluo *DirectLinkUpdateOne) SetNillableName(s *string) *DirectLinkUpdateOne {
|
||||
if s != nil {
|
||||
dluo.SetName(*s)
|
||||
}
|
||||
return dluo
|
||||
}
|
||||
|
||||
// SetDownloads sets the "downloads" field.
|
||||
func (dluo *DirectLinkUpdateOne) SetDownloads(i int) *DirectLinkUpdateOne {
|
||||
dluo.mutation.ResetDownloads()
|
||||
dluo.mutation.SetDownloads(i)
|
||||
return dluo
|
||||
}
|
||||
|
||||
// SetNillableDownloads sets the "downloads" field if the given value is not nil.
|
||||
func (dluo *DirectLinkUpdateOne) SetNillableDownloads(i *int) *DirectLinkUpdateOne {
|
||||
if i != nil {
|
||||
dluo.SetDownloads(*i)
|
||||
}
|
||||
return dluo
|
||||
}
|
||||
|
||||
// AddDownloads adds i to the "downloads" field.
|
||||
func (dluo *DirectLinkUpdateOne) AddDownloads(i int) *DirectLinkUpdateOne {
|
||||
dluo.mutation.AddDownloads(i)
|
||||
return dluo
|
||||
}
|
||||
|
||||
// SetFileID sets the "file_id" field.
|
||||
func (dluo *DirectLinkUpdateOne) SetFileID(i int) *DirectLinkUpdateOne {
|
||||
dluo.mutation.SetFileID(i)
|
||||
return dluo
|
||||
}
|
||||
|
||||
// SetNillableFileID sets the "file_id" field if the given value is not nil.
|
||||
func (dluo *DirectLinkUpdateOne) SetNillableFileID(i *int) *DirectLinkUpdateOne {
|
||||
if i != nil {
|
||||
dluo.SetFileID(*i)
|
||||
}
|
||||
return dluo
|
||||
}
|
||||
|
||||
// SetSpeed sets the "speed" field.
|
||||
func (dluo *DirectLinkUpdateOne) SetSpeed(i int) *DirectLinkUpdateOne {
|
||||
dluo.mutation.ResetSpeed()
|
||||
dluo.mutation.SetSpeed(i)
|
||||
return dluo
|
||||
}
|
||||
|
||||
// SetNillableSpeed sets the "speed" field if the given value is not nil.
|
||||
func (dluo *DirectLinkUpdateOne) SetNillableSpeed(i *int) *DirectLinkUpdateOne {
|
||||
if i != nil {
|
||||
dluo.SetSpeed(*i)
|
||||
}
|
||||
return dluo
|
||||
}
|
||||
|
||||
// AddSpeed adds i to the "speed" field.
|
||||
func (dluo *DirectLinkUpdateOne) AddSpeed(i int) *DirectLinkUpdateOne {
|
||||
dluo.mutation.AddSpeed(i)
|
||||
return dluo
|
||||
}
|
||||
|
||||
// SetFile sets the "file" edge to the File entity.
|
||||
func (dluo *DirectLinkUpdateOne) SetFile(f *File) *DirectLinkUpdateOne {
|
||||
return dluo.SetFileID(f.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the DirectLinkMutation object of the builder.
|
||||
func (dluo *DirectLinkUpdateOne) Mutation() *DirectLinkMutation {
|
||||
return dluo.mutation
|
||||
}
|
||||
|
||||
// ClearFile clears the "file" edge to the File entity.
|
||||
func (dluo *DirectLinkUpdateOne) ClearFile() *DirectLinkUpdateOne {
|
||||
dluo.mutation.ClearFile()
|
||||
return dluo
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the DirectLinkUpdate builder.
|
||||
func (dluo *DirectLinkUpdateOne) Where(ps ...predicate.DirectLink) *DirectLinkUpdateOne {
|
||||
dluo.mutation.Where(ps...)
|
||||
return dluo
|
||||
}
|
||||
|
||||
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||
// The default is selecting all fields defined in the entity schema.
|
||||
func (dluo *DirectLinkUpdateOne) Select(field string, fields ...string) *DirectLinkUpdateOne {
|
||||
dluo.fields = append([]string{field}, fields...)
|
||||
return dluo
|
||||
}
|
||||
|
||||
// Save executes the query and returns the updated DirectLink entity.
|
||||
func (dluo *DirectLinkUpdateOne) Save(ctx context.Context) (*DirectLink, error) {
|
||||
if err := dluo.defaults(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return withHooks(ctx, dluo.sqlSave, dluo.mutation, dluo.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (dluo *DirectLinkUpdateOne) SaveX(ctx context.Context) *DirectLink {
|
||||
node, err := dluo.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// Exec executes the query on the entity.
|
||||
func (dluo *DirectLinkUpdateOne) Exec(ctx context.Context) error {
|
||||
_, err := dluo.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (dluo *DirectLinkUpdateOne) ExecX(ctx context.Context) {
|
||||
if err := dluo.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (dluo *DirectLinkUpdateOne) defaults() error {
|
||||
if _, ok := dluo.mutation.UpdatedAt(); !ok {
|
||||
if directlink.UpdateDefaultUpdatedAt == nil {
|
||||
return fmt.Errorf("ent: uninitialized directlink.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
|
||||
}
|
||||
v := directlink.UpdateDefaultUpdatedAt()
|
||||
dluo.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (dluo *DirectLinkUpdateOne) check() error {
|
||||
if _, ok := dluo.mutation.FileID(); dluo.mutation.FileCleared() && !ok {
|
||||
return errors.New(`ent: clearing a required unique edge "DirectLink.file"`)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dluo *DirectLinkUpdateOne) sqlSave(ctx context.Context) (_node *DirectLink, err error) {
|
||||
if err := dluo.check(); err != nil {
|
||||
return _node, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(directlink.Table, directlink.Columns, sqlgraph.NewFieldSpec(directlink.FieldID, field.TypeInt))
|
||||
id, ok := dluo.mutation.ID()
|
||||
if !ok {
|
||||
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "DirectLink.id" for update`)}
|
||||
}
|
||||
_spec.Node.ID.Value = id
|
||||
if fields := dluo.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, directlink.FieldID)
|
||||
for _, f := range fields {
|
||||
if !directlink.ValidColumn(f) {
|
||||
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
if f != directlink.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := dluo.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := dluo.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(directlink.FieldUpdatedAt, field.TypeTime, value)
|
||||
}
|
||||
if value, ok := dluo.mutation.DeletedAt(); ok {
|
||||
_spec.SetField(directlink.FieldDeletedAt, field.TypeTime, value)
|
||||
}
|
||||
if dluo.mutation.DeletedAtCleared() {
|
||||
_spec.ClearField(directlink.FieldDeletedAt, field.TypeTime)
|
||||
}
|
||||
if value, ok := dluo.mutation.Name(); ok {
|
||||
_spec.SetField(directlink.FieldName, field.TypeString, value)
|
||||
}
|
||||
if value, ok := dluo.mutation.Downloads(); ok {
|
||||
_spec.SetField(directlink.FieldDownloads, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := dluo.mutation.AddedDownloads(); ok {
|
||||
_spec.AddField(directlink.FieldDownloads, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := dluo.mutation.Speed(); ok {
|
||||
_spec.SetField(directlink.FieldSpeed, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := dluo.mutation.AddedSpeed(); ok {
|
||||
_spec.AddField(directlink.FieldSpeed, field.TypeInt, value)
|
||||
}
|
||||
if dluo.mutation.FileCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: directlink.FileTable,
|
||||
Columns: []string{directlink.FileColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(file.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := dluo.mutation.FileIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: directlink.FileTable,
|
||||
Columns: []string{directlink.FileColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(file.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
_node = &DirectLink{config: dluo.config}
|
||||
_spec.Assign = _node.assignValues
|
||||
_spec.ScanValues = _node.scanValues
|
||||
if err = sqlgraph.UpdateNode(ctx, dluo.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{directlink.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
dluo.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
638
ent/ent.go
Normal file
638
ent/ent.go
Normal file
@@ -0,0 +1,638 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/davaccount"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/directlink"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/entity"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/file"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/group"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/metadata"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/node"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/oauthclient"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/oauthgrant"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/passkey"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/setting"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/share"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/storagepolicy"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/task"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/user"
|
||||
)
|
||||
|
||||
// ent aliases to avoid import conflicts in user's code.
|
||||
type (
|
||||
Op = ent.Op
|
||||
Hook = ent.Hook
|
||||
Value = ent.Value
|
||||
Query = ent.Query
|
||||
QueryContext = ent.QueryContext
|
||||
Querier = ent.Querier
|
||||
QuerierFunc = ent.QuerierFunc
|
||||
Interceptor = ent.Interceptor
|
||||
InterceptFunc = ent.InterceptFunc
|
||||
Traverser = ent.Traverser
|
||||
TraverseFunc = ent.TraverseFunc
|
||||
Policy = ent.Policy
|
||||
Mutator = ent.Mutator
|
||||
Mutation = ent.Mutation
|
||||
MutateFunc = ent.MutateFunc
|
||||
)
|
||||
|
||||
type clientCtxKey struct{}
|
||||
|
||||
// FromContext returns a Client stored inside a context, or nil if there isn't one.
|
||||
func FromContext(ctx context.Context) *Client {
|
||||
c, _ := ctx.Value(clientCtxKey{}).(*Client)
|
||||
return c
|
||||
}
|
||||
|
||||
// NewContext returns a new context with the given Client attached.
|
||||
func NewContext(parent context.Context, c *Client) context.Context {
|
||||
return context.WithValue(parent, clientCtxKey{}, c)
|
||||
}
|
||||
|
||||
type txCtxKey struct{}
|
||||
|
||||
// TxFromContext returns a Tx stored inside a context, or nil if there isn't one.
|
||||
func TxFromContext(ctx context.Context) *Tx {
|
||||
tx, _ := ctx.Value(txCtxKey{}).(*Tx)
|
||||
return tx
|
||||
}
|
||||
|
||||
// NewTxContext returns a new context with the given Tx attached.
|
||||
func NewTxContext(parent context.Context, tx *Tx) context.Context {
|
||||
return context.WithValue(parent, txCtxKey{}, tx)
|
||||
}
|
||||
|
||||
// OrderFunc applies an ordering on the sql selector.
|
||||
// Deprecated: Use Asc/Desc functions or the package builders instead.
|
||||
type OrderFunc func(*sql.Selector)
|
||||
|
||||
var (
|
||||
initCheck sync.Once
|
||||
columnCheck sql.ColumnCheck
|
||||
)
|
||||
|
||||
// columnChecker checks if the column exists in the given table.
|
||||
func checkColumn(table, column string) error {
|
||||
initCheck.Do(func() {
|
||||
columnCheck = sql.NewColumnCheck(map[string]func(string) bool{
|
||||
davaccount.Table: davaccount.ValidColumn,
|
||||
directlink.Table: directlink.ValidColumn,
|
||||
entity.Table: entity.ValidColumn,
|
||||
file.Table: file.ValidColumn,
|
||||
fsevent.Table: fsevent.ValidColumn,
|
||||
group.Table: group.ValidColumn,
|
||||
metadata.Table: metadata.ValidColumn,
|
||||
node.Table: node.ValidColumn,
|
||||
oauthclient.Table: oauthclient.ValidColumn,
|
||||
oauthgrant.Table: oauthgrant.ValidColumn,
|
||||
passkey.Table: passkey.ValidColumn,
|
||||
setting.Table: setting.ValidColumn,
|
||||
share.Table: share.ValidColumn,
|
||||
storagepolicy.Table: storagepolicy.ValidColumn,
|
||||
task.Table: task.ValidColumn,
|
||||
user.Table: user.ValidColumn,
|
||||
})
|
||||
})
|
||||
return columnCheck(table, column)
|
||||
}
|
||||
|
||||
// Asc applies the given fields in ASC order.
|
||||
func Asc(fields ...string) func(*sql.Selector) {
|
||||
return func(s *sql.Selector) {
|
||||
for _, f := range fields {
|
||||
if err := checkColumn(s.TableName(), f); err != nil {
|
||||
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
|
||||
}
|
||||
s.OrderBy(sql.Asc(s.C(f)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Desc applies the given fields in DESC order.
|
||||
func Desc(fields ...string) func(*sql.Selector) {
|
||||
return func(s *sql.Selector) {
|
||||
for _, f := range fields {
|
||||
if err := checkColumn(s.TableName(), f); err != nil {
|
||||
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
|
||||
}
|
||||
s.OrderBy(sql.Desc(s.C(f)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AggregateFunc applies an aggregation step on the group-by traversal/selector.
|
||||
type AggregateFunc func(*sql.Selector) string
|
||||
|
||||
// As is a pseudo aggregation function for renaming another other functions with custom names. For example:
|
||||
//
|
||||
// GroupBy(field1, field2).
|
||||
// Aggregate(ent.As(ent.Sum(field1), "sum_field1"), (ent.As(ent.Sum(field2), "sum_field2")).
|
||||
// Scan(ctx, &v)
|
||||
func As(fn AggregateFunc, end string) AggregateFunc {
|
||||
return func(s *sql.Selector) string {
|
||||
return sql.As(fn(s), end)
|
||||
}
|
||||
}
|
||||
|
||||
// Count applies the "count" aggregation function on each group.
|
||||
func Count() AggregateFunc {
|
||||
return func(s *sql.Selector) string {
|
||||
return sql.Count("*")
|
||||
}
|
||||
}
|
||||
|
||||
// Max applies the "max" aggregation function on the given field of each group.
|
||||
func Max(field string) AggregateFunc {
|
||||
return func(s *sql.Selector) string {
|
||||
if err := checkColumn(s.TableName(), field); err != nil {
|
||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
||||
return ""
|
||||
}
|
||||
return sql.Max(s.C(field))
|
||||
}
|
||||
}
|
||||
|
||||
// Mean applies the "mean" aggregation function on the given field of each group.
|
||||
func Mean(field string) AggregateFunc {
|
||||
return func(s *sql.Selector) string {
|
||||
if err := checkColumn(s.TableName(), field); err != nil {
|
||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
||||
return ""
|
||||
}
|
||||
return sql.Avg(s.C(field))
|
||||
}
|
||||
}
|
||||
|
||||
// Min applies the "min" aggregation function on the given field of each group.
|
||||
func Min(field string) AggregateFunc {
|
||||
return func(s *sql.Selector) string {
|
||||
if err := checkColumn(s.TableName(), field); err != nil {
|
||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
||||
return ""
|
||||
}
|
||||
return sql.Min(s.C(field))
|
||||
}
|
||||
}
|
||||
|
||||
// Sum applies the "sum" aggregation function on the given field of each group.
|
||||
func Sum(field string) AggregateFunc {
|
||||
return func(s *sql.Selector) string {
|
||||
if err := checkColumn(s.TableName(), field); err != nil {
|
||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
||||
return ""
|
||||
}
|
||||
return sql.Sum(s.C(field))
|
||||
}
|
||||
}
|
||||
|
||||
// ValidationError returns when validating a field or edge fails.
|
||||
type ValidationError struct {
|
||||
Name string // Field or edge name.
|
||||
err error
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e *ValidationError) Error() string {
|
||||
return e.err.Error()
|
||||
}
|
||||
|
||||
// Unwrap implements the errors.Wrapper interface.
|
||||
func (e *ValidationError) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
// IsValidationError returns a boolean indicating whether the error is a validation error.
|
||||
func IsValidationError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
var e *ValidationError
|
||||
return errors.As(err, &e)
|
||||
}
|
||||
|
||||
// NotFoundError returns when trying to fetch a specific entity and it was not found in the database.
|
||||
type NotFoundError struct {
|
||||
label string
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e *NotFoundError) Error() string {
|
||||
return "ent: " + e.label + " not found"
|
||||
}
|
||||
|
||||
// IsNotFound returns a boolean indicating whether the error is a not found error.
|
||||
func IsNotFound(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
var e *NotFoundError
|
||||
return errors.As(err, &e)
|
||||
}
|
||||
|
||||
// MaskNotFound masks not found error.
|
||||
func MaskNotFound(err error) error {
|
||||
if IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// NotSingularError returns when trying to fetch a singular entity and more then one was found in the database.
|
||||
type NotSingularError struct {
|
||||
label string
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e *NotSingularError) Error() string {
|
||||
return "ent: " + e.label + " not singular"
|
||||
}
|
||||
|
||||
// IsNotSingular returns a boolean indicating whether the error is a not singular error.
|
||||
func IsNotSingular(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
var e *NotSingularError
|
||||
return errors.As(err, &e)
|
||||
}
|
||||
|
||||
// NotLoadedError returns when trying to get a node that was not loaded by the query.
|
||||
type NotLoadedError struct {
|
||||
edge string
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e *NotLoadedError) Error() string {
|
||||
return "ent: " + e.edge + " edge was not loaded"
|
||||
}
|
||||
|
||||
// IsNotLoaded returns a boolean indicating whether the error is a not loaded error.
|
||||
func IsNotLoaded(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
var e *NotLoadedError
|
||||
return errors.As(err, &e)
|
||||
}
|
||||
|
||||
// ConstraintError returns when trying to create/update one or more entities and
|
||||
// one or more of their constraints failed. For example, violation of edge or
|
||||
// field uniqueness.
|
||||
type ConstraintError struct {
|
||||
msg string
|
||||
wrap error
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e ConstraintError) Error() string {
|
||||
return "ent: constraint failed: " + e.msg
|
||||
}
|
||||
|
||||
// Unwrap implements the errors.Wrapper interface.
|
||||
func (e *ConstraintError) Unwrap() error {
|
||||
return e.wrap
|
||||
}
|
||||
|
||||
// IsConstraintError returns a boolean indicating whether the error is a constraint failure.
|
||||
func IsConstraintError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
var e *ConstraintError
|
||||
return errors.As(err, &e)
|
||||
}
|
||||
|
||||
// selector embedded by the different Select/GroupBy builders.
|
||||
type selector struct {
|
||||
label string
|
||||
flds *[]string
|
||||
fns []AggregateFunc
|
||||
scan func(context.Context, any) error
|
||||
}
|
||||
|
||||
// ScanX is like Scan, but panics if an error occurs.
|
||||
func (s *selector) ScanX(ctx context.Context, v any) {
|
||||
if err := s.scan(ctx, v); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Strings returns list of strings from a selector. It is only allowed when selecting one field.
|
||||
func (s *selector) Strings(ctx context.Context) ([]string, error) {
|
||||
if len(*s.flds) > 1 {
|
||||
return nil, errors.New("ent: Strings is not achievable when selecting more than 1 field")
|
||||
}
|
||||
var v []string
|
||||
if err := s.scan(ctx, &v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// StringsX is like Strings, but panics if an error occurs.
|
||||
func (s *selector) StringsX(ctx context.Context) []string {
|
||||
v, err := s.Strings(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// String returns a single string from a selector. It is only allowed when selecting one field.
|
||||
func (s *selector) String(ctx context.Context) (_ string, err error) {
|
||||
var v []string
|
||||
if v, err = s.Strings(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(v) {
|
||||
case 1:
|
||||
return v[0], nil
|
||||
case 0:
|
||||
err = &NotFoundError{s.label}
|
||||
default:
|
||||
err = fmt.Errorf("ent: Strings returned %d results when one was expected", len(v))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// StringX is like String, but panics if an error occurs.
|
||||
func (s *selector) StringX(ctx context.Context) string {
|
||||
v, err := s.String(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Ints returns list of ints from a selector. It is only allowed when selecting one field.
|
||||
func (s *selector) Ints(ctx context.Context) ([]int, error) {
|
||||
if len(*s.flds) > 1 {
|
||||
return nil, errors.New("ent: Ints is not achievable when selecting more than 1 field")
|
||||
}
|
||||
var v []int
|
||||
if err := s.scan(ctx, &v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// IntsX is like Ints, but panics if an error occurs.
|
||||
func (s *selector) IntsX(ctx context.Context) []int {
|
||||
v, err := s.Ints(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Int returns a single int from a selector. It is only allowed when selecting one field.
|
||||
func (s *selector) Int(ctx context.Context) (_ int, err error) {
|
||||
var v []int
|
||||
if v, err = s.Ints(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(v) {
|
||||
case 1:
|
||||
return v[0], nil
|
||||
case 0:
|
||||
err = &NotFoundError{s.label}
|
||||
default:
|
||||
err = fmt.Errorf("ent: Ints returned %d results when one was expected", len(v))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// IntX is like Int, but panics if an error occurs.
|
||||
func (s *selector) IntX(ctx context.Context) int {
|
||||
v, err := s.Int(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Float64s returns list of float64s from a selector. It is only allowed when selecting one field.
|
||||
func (s *selector) Float64s(ctx context.Context) ([]float64, error) {
|
||||
if len(*s.flds) > 1 {
|
||||
return nil, errors.New("ent: Float64s is not achievable when selecting more than 1 field")
|
||||
}
|
||||
var v []float64
|
||||
if err := s.scan(ctx, &v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// Float64sX is like Float64s, but panics if an error occurs.
|
||||
func (s *selector) Float64sX(ctx context.Context) []float64 {
|
||||
v, err := s.Float64s(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Float64 returns a single float64 from a selector. It is only allowed when selecting one field.
|
||||
func (s *selector) Float64(ctx context.Context) (_ float64, err error) {
|
||||
var v []float64
|
||||
if v, err = s.Float64s(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(v) {
|
||||
case 1:
|
||||
return v[0], nil
|
||||
case 0:
|
||||
err = &NotFoundError{s.label}
|
||||
default:
|
||||
err = fmt.Errorf("ent: Float64s returned %d results when one was expected", len(v))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Float64X is like Float64, but panics if an error occurs.
|
||||
func (s *selector) Float64X(ctx context.Context) float64 {
|
||||
v, err := s.Float64(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Bools returns list of bools from a selector. It is only allowed when selecting one field.
|
||||
func (s *selector) Bools(ctx context.Context) ([]bool, error) {
|
||||
if len(*s.flds) > 1 {
|
||||
return nil, errors.New("ent: Bools is not achievable when selecting more than 1 field")
|
||||
}
|
||||
var v []bool
|
||||
if err := s.scan(ctx, &v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// BoolsX is like Bools, but panics if an error occurs.
|
||||
func (s *selector) BoolsX(ctx context.Context) []bool {
|
||||
v, err := s.Bools(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Bool returns a single bool from a selector. It is only allowed when selecting one field.
|
||||
func (s *selector) Bool(ctx context.Context) (_ bool, err error) {
|
||||
var v []bool
|
||||
if v, err = s.Bools(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(v) {
|
||||
case 1:
|
||||
return v[0], nil
|
||||
case 0:
|
||||
err = &NotFoundError{s.label}
|
||||
default:
|
||||
err = fmt.Errorf("ent: Bools returned %d results when one was expected", len(v))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// BoolX is like Bool, but panics if an error occurs.
|
||||
func (s *selector) BoolX(ctx context.Context) bool {
|
||||
v, err := s.Bool(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// withHooks invokes the builder operation with the given hooks, if any.
|
||||
func withHooks[V Value, M any, PM interface {
|
||||
*M
|
||||
Mutation
|
||||
}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) {
|
||||
if len(hooks) == 0 {
|
||||
return exec(ctx)
|
||||
}
|
||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||
mutationT, ok := any(m).(PM)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||
}
|
||||
// Set the mutation to the builder.
|
||||
*mutation = *mutationT
|
||||
return exec(ctx)
|
||||
})
|
||||
for i := len(hooks) - 1; i >= 0; i-- {
|
||||
if hooks[i] == nil {
|
||||
return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = hooks[i](mut)
|
||||
}
|
||||
v, err := mut.Mutate(ctx, mutation)
|
||||
if err != nil {
|
||||
return value, err
|
||||
}
|
||||
nv, ok := v.(V)
|
||||
if !ok {
|
||||
return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation)
|
||||
}
|
||||
return nv, nil
|
||||
}
|
||||
|
||||
// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist.
|
||||
func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context {
|
||||
if ent.QueryFromContext(ctx) == nil {
|
||||
qc.Op = op
|
||||
ctx = ent.NewQueryContext(ctx, qc)
|
||||
}
|
||||
return ctx
|
||||
}
|
||||
|
||||
func querierAll[V Value, Q interface {
|
||||
sqlAll(context.Context, ...queryHook) (V, error)
|
||||
}]() Querier {
|
||||
return QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
|
||||
query, ok := q.(Q)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected query type %T", q)
|
||||
}
|
||||
return query.sqlAll(ctx)
|
||||
})
|
||||
}
|
||||
|
||||
func querierCount[Q interface {
|
||||
sqlCount(context.Context) (int, error)
|
||||
}]() Querier {
|
||||
return QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
|
||||
query, ok := q.(Q)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected query type %T", q)
|
||||
}
|
||||
return query.sqlCount(ctx)
|
||||
})
|
||||
}
|
||||
|
||||
func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) {
|
||||
for i := len(inters) - 1; i >= 0; i-- {
|
||||
qr = inters[i].Intercept(qr)
|
||||
}
|
||||
rv, err := qr.Query(ctx, q)
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
vt, ok := rv.(V)
|
||||
if !ok {
|
||||
return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v)
|
||||
}
|
||||
return vt, nil
|
||||
}
|
||||
|
||||
func scanWithInterceptors[Q1 ent.Query, Q2 interface {
|
||||
sqlScan(context.Context, Q1, any) error
|
||||
}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error {
|
||||
rv := reflect.ValueOf(v)
|
||||
var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
|
||||
query, ok := q.(Q1)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected query type %T", q)
|
||||
}
|
||||
if err := selectOrGroup.sqlScan(ctx, query, v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() {
|
||||
return rv.Elem().Interface(), nil
|
||||
}
|
||||
return v, nil
|
||||
})
|
||||
for i := len(inters) - 1; i >= 0; i-- {
|
||||
qr = inters[i].Intercept(qr)
|
||||
}
|
||||
vv, err := qr.Query(ctx, rootQuery)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch rv2 := reflect.ValueOf(vv); {
|
||||
case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer:
|
||||
case rv.Type() == rv2.Type():
|
||||
rv.Elem().Set(rv2.Elem())
|
||||
case rv.Elem().Type() == rv2.Type():
|
||||
rv.Elem().Set(rv2)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// queryHook describes an internal hook for the different sqlAll methods.
|
||||
type queryHook func(context.Context, *sqlgraph.QuerySpec)
|
||||
29
ent/entc.go
Normal file
29
ent/entc.go
Normal file
@@ -0,0 +1,29 @@
|
||||
//go:build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"entgo.io/ent/entc"
|
||||
"entgo.io/ent/entc/gen"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := entc.Generate("./schema", &gen.Config{
|
||||
Features: []gen.Feature{
|
||||
gen.FeatureIntercept,
|
||||
gen.FeatureSnapshot,
|
||||
gen.FeatureUpsert,
|
||||
gen.FeatureUpsert,
|
||||
gen.FeatureExecQuery,
|
||||
},
|
||||
Templates: []*gen.Template{
|
||||
gen.MustParse(gen.NewTemplate("edge_helper").ParseFiles("templates/edgehelper.tmpl")),
|
||||
gen.MustParse(gen.NewTemplate("mutation_helper").ParseFiles("templates/mutationhelper.tmpl")),
|
||||
gen.MustParse(gen.NewTemplate("create_helper").ParseFiles("templates/createhelper.tmpl")),
|
||||
},
|
||||
}); err != nil {
|
||||
log.Fatal("running ent codegen:", err)
|
||||
}
|
||||
}
|
||||
317
ent/entity.go
Normal file
317
ent/entity.go
Normal file
@@ -0,0 +1,317 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/entity"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/storagepolicy"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/user"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/gofrs/uuid"
|
||||
)
|
||||
|
||||
// Entity is the model entity for the Entity schema.
|
||||
type Entity struct {
|
||||
config `json:"-"`
|
||||
// ID of the ent.
|
||||
ID int `json:"id,omitempty"`
|
||||
// CreatedAt holds the value of the "created_at" field.
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// UpdatedAt holds the value of the "updated_at" field.
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
// DeletedAt holds the value of the "deleted_at" field.
|
||||
DeletedAt *time.Time `json:"deleted_at,omitempty"`
|
||||
// Type holds the value of the "type" field.
|
||||
Type int `json:"type,omitempty"`
|
||||
// Source holds the value of the "source" field.
|
||||
Source string `json:"source,omitempty"`
|
||||
// Size holds the value of the "size" field.
|
||||
Size int64 `json:"size,omitempty"`
|
||||
// ReferenceCount holds the value of the "reference_count" field.
|
||||
ReferenceCount int `json:"reference_count,omitempty"`
|
||||
// StoragePolicyEntities holds the value of the "storage_policy_entities" field.
|
||||
StoragePolicyEntities int `json:"storage_policy_entities,omitempty"`
|
||||
// CreatedBy holds the value of the "created_by" field.
|
||||
CreatedBy int `json:"created_by,omitempty"`
|
||||
// UploadSessionID holds the value of the "upload_session_id" field.
|
||||
UploadSessionID *uuid.UUID `json:"upload_session_id,omitempty"`
|
||||
// Props holds the value of the "props" field.
|
||||
Props *types.EntityProps `json:"props,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the EntityQuery when eager-loading is set.
|
||||
Edges EntityEdges `json:"edges"`
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// EntityEdges holds the relations/edges for other nodes in the graph.
|
||||
type EntityEdges struct {
|
||||
// File holds the value of the file edge.
|
||||
File []*File `json:"file,omitempty"`
|
||||
// User holds the value of the user edge.
|
||||
User *User `json:"user,omitempty"`
|
||||
// StoragePolicy holds the value of the storage_policy edge.
|
||||
StoragePolicy *StoragePolicy `json:"storage_policy,omitempty"`
|
||||
// loadedTypes holds the information for reporting if a
|
||||
// type was loaded (or requested) in eager-loading or not.
|
||||
loadedTypes [3]bool
|
||||
}
|
||||
|
||||
// FileOrErr returns the File value or an error if the edge
|
||||
// was not loaded in eager-loading.
|
||||
func (e EntityEdges) FileOrErr() ([]*File, error) {
|
||||
if e.loadedTypes[0] {
|
||||
return e.File, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "file"}
|
||||
}
|
||||
|
||||
// UserOrErr returns the User value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e EntityEdges) UserOrErr() (*User, error) {
|
||||
if e.loadedTypes[1] {
|
||||
if e.User == nil {
|
||||
// Edge was loaded but was not found.
|
||||
return nil, &NotFoundError{label: user.Label}
|
||||
}
|
||||
return e.User, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "user"}
|
||||
}
|
||||
|
||||
// StoragePolicyOrErr returns the StoragePolicy value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e EntityEdges) StoragePolicyOrErr() (*StoragePolicy, error) {
|
||||
if e.loadedTypes[2] {
|
||||
if e.StoragePolicy == nil {
|
||||
// Edge was loaded but was not found.
|
||||
return nil, &NotFoundError{label: storagepolicy.Label}
|
||||
}
|
||||
return e.StoragePolicy, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "storage_policy"}
|
||||
}
|
||||
|
||||
// scanValues returns the types for scanning values from sql.Rows.
|
||||
func (*Entity) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case entity.FieldUploadSessionID:
|
||||
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
|
||||
case entity.FieldProps:
|
||||
values[i] = new([]byte)
|
||||
case entity.FieldID, entity.FieldType, entity.FieldSize, entity.FieldReferenceCount, entity.FieldStoragePolicyEntities, entity.FieldCreatedBy:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case entity.FieldSource:
|
||||
values[i] = new(sql.NullString)
|
||||
case entity.FieldCreatedAt, entity.FieldUpdatedAt, entity.FieldDeletedAt:
|
||||
values[i] = new(sql.NullTime)
|
||||
default:
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||
// to the Entity fields.
|
||||
func (e *Entity) assignValues(columns []string, values []any) error {
|
||||
if m, n := len(values), len(columns); m < n {
|
||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||
}
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case entity.FieldID:
|
||||
value, ok := values[i].(*sql.NullInt64)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected type %T for field id", value)
|
||||
}
|
||||
e.ID = int(value.Int64)
|
||||
case entity.FieldCreatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||
} else if value.Valid {
|
||||
e.CreatedAt = value.Time
|
||||
}
|
||||
case entity.FieldUpdatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||
} else if value.Valid {
|
||||
e.UpdatedAt = value.Time
|
||||
}
|
||||
case entity.FieldDeletedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
|
||||
} else if value.Valid {
|
||||
e.DeletedAt = new(time.Time)
|
||||
*e.DeletedAt = value.Time
|
||||
}
|
||||
case entity.FieldType:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field type", values[i])
|
||||
} else if value.Valid {
|
||||
e.Type = int(value.Int64)
|
||||
}
|
||||
case entity.FieldSource:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field source", values[i])
|
||||
} else if value.Valid {
|
||||
e.Source = value.String
|
||||
}
|
||||
case entity.FieldSize:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field size", values[i])
|
||||
} else if value.Valid {
|
||||
e.Size = value.Int64
|
||||
}
|
||||
case entity.FieldReferenceCount:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field reference_count", values[i])
|
||||
} else if value.Valid {
|
||||
e.ReferenceCount = int(value.Int64)
|
||||
}
|
||||
case entity.FieldStoragePolicyEntities:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field storage_policy_entities", values[i])
|
||||
} else if value.Valid {
|
||||
e.StoragePolicyEntities = int(value.Int64)
|
||||
}
|
||||
case entity.FieldCreatedBy:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field created_by", values[i])
|
||||
} else if value.Valid {
|
||||
e.CreatedBy = int(value.Int64)
|
||||
}
|
||||
case entity.FieldUploadSessionID:
|
||||
if value, ok := values[i].(*sql.NullScanner); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field upload_session_id", values[i])
|
||||
} else if value.Valid {
|
||||
e.UploadSessionID = new(uuid.UUID)
|
||||
*e.UploadSessionID = *value.S.(*uuid.UUID)
|
||||
}
|
||||
case entity.FieldProps:
|
||||
if value, ok := values[i].(*[]byte); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field props", values[i])
|
||||
} else if value != nil && len(*value) > 0 {
|
||||
if err := json.Unmarshal(*value, &e.Props); err != nil {
|
||||
return fmt.Errorf("unmarshal field props: %w", err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
e.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the Entity.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (e *Entity) Value(name string) (ent.Value, error) {
|
||||
return e.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryFile queries the "file" edge of the Entity entity.
|
||||
func (e *Entity) QueryFile() *FileQuery {
|
||||
return NewEntityClient(e.config).QueryFile(e)
|
||||
}
|
||||
|
||||
// QueryUser queries the "user" edge of the Entity entity.
|
||||
func (e *Entity) QueryUser() *UserQuery {
|
||||
return NewEntityClient(e.config).QueryUser(e)
|
||||
}
|
||||
|
||||
// QueryStoragePolicy queries the "storage_policy" edge of the Entity entity.
|
||||
func (e *Entity) QueryStoragePolicy() *StoragePolicyQuery {
|
||||
return NewEntityClient(e.config).QueryStoragePolicy(e)
|
||||
}
|
||||
|
||||
// Update returns a builder for updating this Entity.
|
||||
// Note that you need to call Entity.Unwrap() before calling this method if this Entity
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
func (e *Entity) Update() *EntityUpdateOne {
|
||||
return NewEntityClient(e.config).UpdateOne(e)
|
||||
}
|
||||
|
||||
// Unwrap unwraps the Entity entity that was returned from a transaction after it was closed,
|
||||
// so that all future queries will be executed through the driver which created the transaction.
|
||||
func (e *Entity) Unwrap() *Entity {
|
||||
_tx, ok := e.config.driver.(*txDriver)
|
||||
if !ok {
|
||||
panic("ent: Entity is not a transactional entity")
|
||||
}
|
||||
e.config.driver = _tx.drv
|
||||
return e
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer.
|
||||
func (e *Entity) String() string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("Entity(")
|
||||
builder.WriteString(fmt.Sprintf("id=%v, ", e.ID))
|
||||
builder.WriteString("created_at=")
|
||||
builder.WriteString(e.CreatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("updated_at=")
|
||||
builder.WriteString(e.UpdatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
if v := e.DeletedAt; v != nil {
|
||||
builder.WriteString("deleted_at=")
|
||||
builder.WriteString(v.Format(time.ANSIC))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("type=")
|
||||
builder.WriteString(fmt.Sprintf("%v", e.Type))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("source=")
|
||||
builder.WriteString(e.Source)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("size=")
|
||||
builder.WriteString(fmt.Sprintf("%v", e.Size))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("reference_count=")
|
||||
builder.WriteString(fmt.Sprintf("%v", e.ReferenceCount))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("storage_policy_entities=")
|
||||
builder.WriteString(fmt.Sprintf("%v", e.StoragePolicyEntities))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("created_by=")
|
||||
builder.WriteString(fmt.Sprintf("%v", e.CreatedBy))
|
||||
builder.WriteString(", ")
|
||||
if v := e.UploadSessionID; v != nil {
|
||||
builder.WriteString("upload_session_id=")
|
||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("props=")
|
||||
builder.WriteString(fmt.Sprintf("%v", e.Props))
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// SetFile manually set the edge as loaded state.
|
||||
func (e *Entity) SetFile(v []*File) {
|
||||
e.Edges.File = v
|
||||
e.Edges.loadedTypes[0] = true
|
||||
}
|
||||
|
||||
// SetUser manually set the edge as loaded state.
|
||||
func (e *Entity) SetUser(v *User) {
|
||||
e.Edges.User = v
|
||||
e.Edges.loadedTypes[1] = true
|
||||
}
|
||||
|
||||
// SetStoragePolicy manually set the edge as loaded state.
|
||||
func (e *Entity) SetStoragePolicy(v *StoragePolicy) {
|
||||
e.Edges.StoragePolicy = v
|
||||
e.Edges.loadedTypes[2] = true
|
||||
}
|
||||
|
||||
// Entities is a parsable slice of Entity.
|
||||
type Entities []*Entity
|
||||
224
ent/entity/entity.go
Normal file
224
ent/entity/entity.go
Normal file
@@ -0,0 +1,224 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package entity
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
const (
|
||||
// Label holds the string label denoting the entity type in the database.
|
||||
Label = "entity"
|
||||
// FieldID holds the string denoting the id field in the database.
|
||||
FieldID = "id"
|
||||
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||
FieldCreatedAt = "created_at"
|
||||
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||
FieldUpdatedAt = "updated_at"
|
||||
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
|
||||
FieldDeletedAt = "deleted_at"
|
||||
// FieldType holds the string denoting the type field in the database.
|
||||
FieldType = "type"
|
||||
// FieldSource holds the string denoting the source field in the database.
|
||||
FieldSource = "source"
|
||||
// FieldSize holds the string denoting the size field in the database.
|
||||
FieldSize = "size"
|
||||
// FieldReferenceCount holds the string denoting the reference_count field in the database.
|
||||
FieldReferenceCount = "reference_count"
|
||||
// FieldStoragePolicyEntities holds the string denoting the storage_policy_entities field in the database.
|
||||
FieldStoragePolicyEntities = "storage_policy_entities"
|
||||
// FieldCreatedBy holds the string denoting the created_by field in the database.
|
||||
FieldCreatedBy = "created_by"
|
||||
// FieldUploadSessionID holds the string denoting the upload_session_id field in the database.
|
||||
FieldUploadSessionID = "upload_session_id"
|
||||
// FieldProps holds the string denoting the props field in the database.
|
||||
FieldProps = "recycle_options"
|
||||
// EdgeFile holds the string denoting the file edge name in mutations.
|
||||
EdgeFile = "file"
|
||||
// EdgeUser holds the string denoting the user edge name in mutations.
|
||||
EdgeUser = "user"
|
||||
// EdgeStoragePolicy holds the string denoting the storage_policy edge name in mutations.
|
||||
EdgeStoragePolicy = "storage_policy"
|
||||
// Table holds the table name of the entity in the database.
|
||||
Table = "entities"
|
||||
// FileTable is the table that holds the file relation/edge. The primary key declared below.
|
||||
FileTable = "file_entities"
|
||||
// FileInverseTable is the table name for the File entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "file" package.
|
||||
FileInverseTable = "files"
|
||||
// UserTable is the table that holds the user relation/edge.
|
||||
UserTable = "entities"
|
||||
// UserInverseTable is the table name for the User entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "user" package.
|
||||
UserInverseTable = "users"
|
||||
// UserColumn is the table column denoting the user relation/edge.
|
||||
UserColumn = "created_by"
|
||||
// StoragePolicyTable is the table that holds the storage_policy relation/edge.
|
||||
StoragePolicyTable = "entities"
|
||||
// StoragePolicyInverseTable is the table name for the StoragePolicy entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "storagepolicy" package.
|
||||
StoragePolicyInverseTable = "storage_policies"
|
||||
// StoragePolicyColumn is the table column denoting the storage_policy relation/edge.
|
||||
StoragePolicyColumn = "storage_policy_entities"
|
||||
)
|
||||
|
||||
// Columns holds all SQL columns for entity fields.
|
||||
var Columns = []string{
|
||||
FieldID,
|
||||
FieldCreatedAt,
|
||||
FieldUpdatedAt,
|
||||
FieldDeletedAt,
|
||||
FieldType,
|
||||
FieldSource,
|
||||
FieldSize,
|
||||
FieldReferenceCount,
|
||||
FieldStoragePolicyEntities,
|
||||
FieldCreatedBy,
|
||||
FieldUploadSessionID,
|
||||
FieldProps,
|
||||
}
|
||||
|
||||
var (
|
||||
// FilePrimaryKey and FileColumn2 are the table columns denoting the
|
||||
// primary key for the file relation (M2M).
|
||||
FilePrimaryKey = []string{"file_id", "entity_id"}
|
||||
)
|
||||
|
||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||
func ValidColumn(column string) bool {
|
||||
for i := range Columns {
|
||||
if column == Columns[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Note that the variables below are initialized by the runtime
|
||||
// package on the initialization of the application. Therefore,
|
||||
// it should be imported in the main as follows:
|
||||
//
|
||||
// import _ "github.com/cloudreve/Cloudreve/v4/ent/runtime"
|
||||
var (
|
||||
Hooks [1]ent.Hook
|
||||
Interceptors [1]ent.Interceptor
|
||||
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||
DefaultCreatedAt func() time.Time
|
||||
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||
DefaultUpdatedAt func() time.Time
|
||||
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||
UpdateDefaultUpdatedAt func() time.Time
|
||||
// DefaultReferenceCount holds the default value on creation for the "reference_count" field.
|
||||
DefaultReferenceCount int
|
||||
)
|
||||
|
||||
// OrderOption defines the ordering options for the Entity queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreatedAt orders the results by the created_at field.
|
||||
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUpdatedAt orders the results by the updated_at field.
|
||||
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByDeletedAt orders the results by the deleted_at field.
|
||||
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByType orders the results by the type field.
|
||||
func ByType(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldType, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySource orders the results by the source field.
|
||||
func BySource(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSource, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySize orders the results by the size field.
|
||||
func BySize(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSize, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByReferenceCount orders the results by the reference_count field.
|
||||
func ByReferenceCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldReferenceCount, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByStoragePolicyEntities orders the results by the storage_policy_entities field.
|
||||
func ByStoragePolicyEntities(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldStoragePolicyEntities, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreatedBy orders the results by the created_by field.
|
||||
func ByCreatedBy(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreatedBy, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUploadSessionID orders the results by the upload_session_id field.
|
||||
func ByUploadSessionID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUploadSessionID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByFileCount orders the results by file count.
|
||||
func ByFileCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newFileStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByFile orders the results by file terms.
|
||||
func ByFile(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newFileStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByUserField orders the results by user field.
|
||||
func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
|
||||
// ByStoragePolicyField orders the results by storage_policy field.
|
||||
func ByStoragePolicyField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newStoragePolicyStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
func newFileStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(FileInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2M, true, FileTable, FilePrimaryKey...),
|
||||
)
|
||||
}
|
||||
func newUserStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(UserInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
|
||||
)
|
||||
}
|
||||
func newStoragePolicyStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(StoragePolicyInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, StoragePolicyTable, StoragePolicyColumn),
|
||||
)
|
||||
}
|
||||
616
ent/entity/where.go
Normal file
616
ent/entity/where.go
Normal file
@@ -0,0 +1,616 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package entity
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
|
||||
"github.com/gofrs/uuid"
|
||||
)
|
||||
|
||||
// ID filters vertices based on their ID field.
|
||||
func ID(id int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDEQ applies the EQ predicate on the ID field.
|
||||
func IDEQ(id int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDNEQ applies the NEQ predicate on the ID field.
|
||||
func IDNEQ(id int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDIn applies the In predicate on the ID field.
|
||||
func IDIn(ids ...int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDNotIn applies the NotIn predicate on the ID field.
|
||||
func IDNotIn(ids ...int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNotIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDGT applies the GT predicate on the ID field.
|
||||
func IDGT(id int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldGT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDGTE applies the GTE predicate on the ID field.
|
||||
func IDGTE(id int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldGTE(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLT applies the LT predicate on the ID field.
|
||||
func IDLT(id int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldLT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLTE applies the LTE predicate on the ID field.
|
||||
func IDLTE(id int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldLTE(FieldID, id))
|
||||
}
|
||||
|
||||
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||
func CreatedAt(v time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
|
||||
func UpdatedAt(v time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ.
|
||||
func DeletedAt(v time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldEQ(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// Type applies equality check predicate on the "type" field. It's identical to TypeEQ.
|
||||
func Type(v int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldEQ(FieldType, v))
|
||||
}
|
||||
|
||||
// Source applies equality check predicate on the "source" field. It's identical to SourceEQ.
|
||||
func Source(v string) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldEQ(FieldSource, v))
|
||||
}
|
||||
|
||||
// Size applies equality check predicate on the "size" field. It's identical to SizeEQ.
|
||||
func Size(v int64) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldEQ(FieldSize, v))
|
||||
}
|
||||
|
||||
// ReferenceCount applies equality check predicate on the "reference_count" field. It's identical to ReferenceCountEQ.
|
||||
func ReferenceCount(v int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldEQ(FieldReferenceCount, v))
|
||||
}
|
||||
|
||||
// StoragePolicyEntities applies equality check predicate on the "storage_policy_entities" field. It's identical to StoragePolicyEntitiesEQ.
|
||||
func StoragePolicyEntities(v int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldEQ(FieldStoragePolicyEntities, v))
|
||||
}
|
||||
|
||||
// CreatedBy applies equality check predicate on the "created_by" field. It's identical to CreatedByEQ.
|
||||
func CreatedBy(v int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldEQ(FieldCreatedBy, v))
|
||||
}
|
||||
|
||||
// UploadSessionID applies equality check predicate on the "upload_session_id" field. It's identical to UploadSessionIDEQ.
|
||||
func UploadSessionID(v uuid.UUID) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldEQ(FieldUploadSessionID, v))
|
||||
}
|
||||
|
||||
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||
func CreatedAtEQ(v time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||
func CreatedAtNEQ(v time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||
func CreatedAtIn(vs ...time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||
func CreatedAtNotIn(vs ...time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||
func CreatedAtGT(v time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldGT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||
func CreatedAtGTE(v time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldGTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||
func CreatedAtLT(v time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldLT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||
func CreatedAtLTE(v time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldLTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
|
||||
func UpdatedAtEQ(v time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
|
||||
func UpdatedAtNEQ(v time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtIn applies the In predicate on the "updated_at" field.
|
||||
func UpdatedAtIn(vs ...time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
|
||||
func UpdatedAtNotIn(vs ...time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNotIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
|
||||
func UpdatedAtGT(v time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldGT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
|
||||
func UpdatedAtGTE(v time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldGTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
|
||||
func UpdatedAtLT(v time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldLT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
|
||||
func UpdatedAtLTE(v time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldLTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtEQ applies the EQ predicate on the "deleted_at" field.
|
||||
func DeletedAtEQ(v time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldEQ(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field.
|
||||
func DeletedAtNEQ(v time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNEQ(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtIn applies the In predicate on the "deleted_at" field.
|
||||
func DeletedAtIn(vs ...time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldIn(FieldDeletedAt, vs...))
|
||||
}
|
||||
|
||||
// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field.
|
||||
func DeletedAtNotIn(vs ...time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNotIn(FieldDeletedAt, vs...))
|
||||
}
|
||||
|
||||
// DeletedAtGT applies the GT predicate on the "deleted_at" field.
|
||||
func DeletedAtGT(v time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldGT(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtGTE applies the GTE predicate on the "deleted_at" field.
|
||||
func DeletedAtGTE(v time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldGTE(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtLT applies the LT predicate on the "deleted_at" field.
|
||||
func DeletedAtLT(v time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldLT(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtLTE applies the LTE predicate on the "deleted_at" field.
|
||||
func DeletedAtLTE(v time.Time) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldLTE(FieldDeletedAt, v))
|
||||
}
|
||||
|
||||
// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field.
|
||||
func DeletedAtIsNil() predicate.Entity {
|
||||
return predicate.Entity(sql.FieldIsNull(FieldDeletedAt))
|
||||
}
|
||||
|
||||
// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field.
|
||||
func DeletedAtNotNil() predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNotNull(FieldDeletedAt))
|
||||
}
|
||||
|
||||
// TypeEQ applies the EQ predicate on the "type" field.
|
||||
func TypeEQ(v int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldEQ(FieldType, v))
|
||||
}
|
||||
|
||||
// TypeNEQ applies the NEQ predicate on the "type" field.
|
||||
func TypeNEQ(v int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNEQ(FieldType, v))
|
||||
}
|
||||
|
||||
// TypeIn applies the In predicate on the "type" field.
|
||||
func TypeIn(vs ...int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldIn(FieldType, vs...))
|
||||
}
|
||||
|
||||
// TypeNotIn applies the NotIn predicate on the "type" field.
|
||||
func TypeNotIn(vs ...int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNotIn(FieldType, vs...))
|
||||
}
|
||||
|
||||
// TypeGT applies the GT predicate on the "type" field.
|
||||
func TypeGT(v int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldGT(FieldType, v))
|
||||
}
|
||||
|
||||
// TypeGTE applies the GTE predicate on the "type" field.
|
||||
func TypeGTE(v int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldGTE(FieldType, v))
|
||||
}
|
||||
|
||||
// TypeLT applies the LT predicate on the "type" field.
|
||||
func TypeLT(v int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldLT(FieldType, v))
|
||||
}
|
||||
|
||||
// TypeLTE applies the LTE predicate on the "type" field.
|
||||
func TypeLTE(v int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldLTE(FieldType, v))
|
||||
}
|
||||
|
||||
// SourceEQ applies the EQ predicate on the "source" field.
|
||||
func SourceEQ(v string) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldEQ(FieldSource, v))
|
||||
}
|
||||
|
||||
// SourceNEQ applies the NEQ predicate on the "source" field.
|
||||
func SourceNEQ(v string) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNEQ(FieldSource, v))
|
||||
}
|
||||
|
||||
// SourceIn applies the In predicate on the "source" field.
|
||||
func SourceIn(vs ...string) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldIn(FieldSource, vs...))
|
||||
}
|
||||
|
||||
// SourceNotIn applies the NotIn predicate on the "source" field.
|
||||
func SourceNotIn(vs ...string) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNotIn(FieldSource, vs...))
|
||||
}
|
||||
|
||||
// SourceGT applies the GT predicate on the "source" field.
|
||||
func SourceGT(v string) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldGT(FieldSource, v))
|
||||
}
|
||||
|
||||
// SourceGTE applies the GTE predicate on the "source" field.
|
||||
func SourceGTE(v string) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldGTE(FieldSource, v))
|
||||
}
|
||||
|
||||
// SourceLT applies the LT predicate on the "source" field.
|
||||
func SourceLT(v string) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldLT(FieldSource, v))
|
||||
}
|
||||
|
||||
// SourceLTE applies the LTE predicate on the "source" field.
|
||||
func SourceLTE(v string) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldLTE(FieldSource, v))
|
||||
}
|
||||
|
||||
// SourceContains applies the Contains predicate on the "source" field.
|
||||
func SourceContains(v string) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldContains(FieldSource, v))
|
||||
}
|
||||
|
||||
// SourceHasPrefix applies the HasPrefix predicate on the "source" field.
|
||||
func SourceHasPrefix(v string) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldHasPrefix(FieldSource, v))
|
||||
}
|
||||
|
||||
// SourceHasSuffix applies the HasSuffix predicate on the "source" field.
|
||||
func SourceHasSuffix(v string) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldHasSuffix(FieldSource, v))
|
||||
}
|
||||
|
||||
// SourceEqualFold applies the EqualFold predicate on the "source" field.
|
||||
func SourceEqualFold(v string) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldEqualFold(FieldSource, v))
|
||||
}
|
||||
|
||||
// SourceContainsFold applies the ContainsFold predicate on the "source" field.
|
||||
func SourceContainsFold(v string) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldContainsFold(FieldSource, v))
|
||||
}
|
||||
|
||||
// SizeEQ applies the EQ predicate on the "size" field.
|
||||
func SizeEQ(v int64) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldEQ(FieldSize, v))
|
||||
}
|
||||
|
||||
// SizeNEQ applies the NEQ predicate on the "size" field.
|
||||
func SizeNEQ(v int64) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNEQ(FieldSize, v))
|
||||
}
|
||||
|
||||
// SizeIn applies the In predicate on the "size" field.
|
||||
func SizeIn(vs ...int64) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldIn(FieldSize, vs...))
|
||||
}
|
||||
|
||||
// SizeNotIn applies the NotIn predicate on the "size" field.
|
||||
func SizeNotIn(vs ...int64) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNotIn(FieldSize, vs...))
|
||||
}
|
||||
|
||||
// SizeGT applies the GT predicate on the "size" field.
|
||||
func SizeGT(v int64) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldGT(FieldSize, v))
|
||||
}
|
||||
|
||||
// SizeGTE applies the GTE predicate on the "size" field.
|
||||
func SizeGTE(v int64) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldGTE(FieldSize, v))
|
||||
}
|
||||
|
||||
// SizeLT applies the LT predicate on the "size" field.
|
||||
func SizeLT(v int64) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldLT(FieldSize, v))
|
||||
}
|
||||
|
||||
// SizeLTE applies the LTE predicate on the "size" field.
|
||||
func SizeLTE(v int64) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldLTE(FieldSize, v))
|
||||
}
|
||||
|
||||
// ReferenceCountEQ applies the EQ predicate on the "reference_count" field.
|
||||
func ReferenceCountEQ(v int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldEQ(FieldReferenceCount, v))
|
||||
}
|
||||
|
||||
// ReferenceCountNEQ applies the NEQ predicate on the "reference_count" field.
|
||||
func ReferenceCountNEQ(v int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNEQ(FieldReferenceCount, v))
|
||||
}
|
||||
|
||||
// ReferenceCountIn applies the In predicate on the "reference_count" field.
|
||||
func ReferenceCountIn(vs ...int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldIn(FieldReferenceCount, vs...))
|
||||
}
|
||||
|
||||
// ReferenceCountNotIn applies the NotIn predicate on the "reference_count" field.
|
||||
func ReferenceCountNotIn(vs ...int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNotIn(FieldReferenceCount, vs...))
|
||||
}
|
||||
|
||||
// ReferenceCountGT applies the GT predicate on the "reference_count" field.
|
||||
func ReferenceCountGT(v int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldGT(FieldReferenceCount, v))
|
||||
}
|
||||
|
||||
// ReferenceCountGTE applies the GTE predicate on the "reference_count" field.
|
||||
func ReferenceCountGTE(v int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldGTE(FieldReferenceCount, v))
|
||||
}
|
||||
|
||||
// ReferenceCountLT applies the LT predicate on the "reference_count" field.
|
||||
func ReferenceCountLT(v int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldLT(FieldReferenceCount, v))
|
||||
}
|
||||
|
||||
// ReferenceCountLTE applies the LTE predicate on the "reference_count" field.
|
||||
func ReferenceCountLTE(v int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldLTE(FieldReferenceCount, v))
|
||||
}
|
||||
|
||||
// StoragePolicyEntitiesEQ applies the EQ predicate on the "storage_policy_entities" field.
|
||||
func StoragePolicyEntitiesEQ(v int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldEQ(FieldStoragePolicyEntities, v))
|
||||
}
|
||||
|
||||
// StoragePolicyEntitiesNEQ applies the NEQ predicate on the "storage_policy_entities" field.
|
||||
func StoragePolicyEntitiesNEQ(v int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNEQ(FieldStoragePolicyEntities, v))
|
||||
}
|
||||
|
||||
// StoragePolicyEntitiesIn applies the In predicate on the "storage_policy_entities" field.
|
||||
func StoragePolicyEntitiesIn(vs ...int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldIn(FieldStoragePolicyEntities, vs...))
|
||||
}
|
||||
|
||||
// StoragePolicyEntitiesNotIn applies the NotIn predicate on the "storage_policy_entities" field.
|
||||
func StoragePolicyEntitiesNotIn(vs ...int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNotIn(FieldStoragePolicyEntities, vs...))
|
||||
}
|
||||
|
||||
// CreatedByEQ applies the EQ predicate on the "created_by" field.
|
||||
func CreatedByEQ(v int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldEQ(FieldCreatedBy, v))
|
||||
}
|
||||
|
||||
// CreatedByNEQ applies the NEQ predicate on the "created_by" field.
|
||||
func CreatedByNEQ(v int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNEQ(FieldCreatedBy, v))
|
||||
}
|
||||
|
||||
// CreatedByIn applies the In predicate on the "created_by" field.
|
||||
func CreatedByIn(vs ...int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldIn(FieldCreatedBy, vs...))
|
||||
}
|
||||
|
||||
// CreatedByNotIn applies the NotIn predicate on the "created_by" field.
|
||||
func CreatedByNotIn(vs ...int) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNotIn(FieldCreatedBy, vs...))
|
||||
}
|
||||
|
||||
// CreatedByIsNil applies the IsNil predicate on the "created_by" field.
|
||||
func CreatedByIsNil() predicate.Entity {
|
||||
return predicate.Entity(sql.FieldIsNull(FieldCreatedBy))
|
||||
}
|
||||
|
||||
// CreatedByNotNil applies the NotNil predicate on the "created_by" field.
|
||||
func CreatedByNotNil() predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNotNull(FieldCreatedBy))
|
||||
}
|
||||
|
||||
// UploadSessionIDEQ applies the EQ predicate on the "upload_session_id" field.
|
||||
func UploadSessionIDEQ(v uuid.UUID) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldEQ(FieldUploadSessionID, v))
|
||||
}
|
||||
|
||||
// UploadSessionIDNEQ applies the NEQ predicate on the "upload_session_id" field.
|
||||
func UploadSessionIDNEQ(v uuid.UUID) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNEQ(FieldUploadSessionID, v))
|
||||
}
|
||||
|
||||
// UploadSessionIDIn applies the In predicate on the "upload_session_id" field.
|
||||
func UploadSessionIDIn(vs ...uuid.UUID) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldIn(FieldUploadSessionID, vs...))
|
||||
}
|
||||
|
||||
// UploadSessionIDNotIn applies the NotIn predicate on the "upload_session_id" field.
|
||||
func UploadSessionIDNotIn(vs ...uuid.UUID) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNotIn(FieldUploadSessionID, vs...))
|
||||
}
|
||||
|
||||
// UploadSessionIDGT applies the GT predicate on the "upload_session_id" field.
|
||||
func UploadSessionIDGT(v uuid.UUID) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldGT(FieldUploadSessionID, v))
|
||||
}
|
||||
|
||||
// UploadSessionIDGTE applies the GTE predicate on the "upload_session_id" field.
|
||||
func UploadSessionIDGTE(v uuid.UUID) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldGTE(FieldUploadSessionID, v))
|
||||
}
|
||||
|
||||
// UploadSessionIDLT applies the LT predicate on the "upload_session_id" field.
|
||||
func UploadSessionIDLT(v uuid.UUID) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldLT(FieldUploadSessionID, v))
|
||||
}
|
||||
|
||||
// UploadSessionIDLTE applies the LTE predicate on the "upload_session_id" field.
|
||||
func UploadSessionIDLTE(v uuid.UUID) predicate.Entity {
|
||||
return predicate.Entity(sql.FieldLTE(FieldUploadSessionID, v))
|
||||
}
|
||||
|
||||
// UploadSessionIDIsNil applies the IsNil predicate on the "upload_session_id" field.
|
||||
func UploadSessionIDIsNil() predicate.Entity {
|
||||
return predicate.Entity(sql.FieldIsNull(FieldUploadSessionID))
|
||||
}
|
||||
|
||||
// UploadSessionIDNotNil applies the NotNil predicate on the "upload_session_id" field.
|
||||
func UploadSessionIDNotNil() predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNotNull(FieldUploadSessionID))
|
||||
}
|
||||
|
||||
// PropsIsNil applies the IsNil predicate on the "props" field.
|
||||
func PropsIsNil() predicate.Entity {
|
||||
return predicate.Entity(sql.FieldIsNull(FieldProps))
|
||||
}
|
||||
|
||||
// PropsNotNil applies the NotNil predicate on the "props" field.
|
||||
func PropsNotNil() predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNotNull(FieldProps))
|
||||
}
|
||||
|
||||
// HasFile applies the HasEdge predicate on the "file" edge.
|
||||
func HasFile() predicate.Entity {
|
||||
return predicate.Entity(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2M, true, FileTable, FilePrimaryKey...),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasFileWith applies the HasEdge predicate on the "file" edge with a given conditions (other predicates).
|
||||
func HasFileWith(preds ...predicate.File) predicate.Entity {
|
||||
return predicate.Entity(func(s *sql.Selector) {
|
||||
step := newFileStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// HasUser applies the HasEdge predicate on the "user" edge.
|
||||
func HasUser() predicate.Entity {
|
||||
return predicate.Entity(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates).
|
||||
func HasUserWith(preds ...predicate.User) predicate.Entity {
|
||||
return predicate.Entity(func(s *sql.Selector) {
|
||||
step := newUserStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// HasStoragePolicy applies the HasEdge predicate on the "storage_policy" edge.
|
||||
func HasStoragePolicy() predicate.Entity {
|
||||
return predicate.Entity(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, StoragePolicyTable, StoragePolicyColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasStoragePolicyWith applies the HasEdge predicate on the "storage_policy" edge with a given conditions (other predicates).
|
||||
func HasStoragePolicyWith(preds ...predicate.StoragePolicy) predicate.Entity {
|
||||
return predicate.Entity(func(s *sql.Selector) {
|
||||
step := newStoragePolicyStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.Entity) predicate.Entity {
|
||||
return predicate.Entity(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.Entity) predicate.Entity {
|
||||
return predicate.Entity(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.Entity) predicate.Entity {
|
||||
return predicate.Entity(sql.NotPredicates(p))
|
||||
}
|
||||
1267
ent/entity_create.go
Normal file
1267
ent/entity_create.go
Normal file
File diff suppressed because it is too large
Load Diff
88
ent/entity_delete.go
Normal file
88
ent/entity_delete.go
Normal file
@@ -0,0 +1,88 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/entity"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
|
||||
)
|
||||
|
||||
// EntityDelete is the builder for deleting a Entity entity.
|
||||
type EntityDelete struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *EntityMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the EntityDelete builder.
|
||||
func (ed *EntityDelete) Where(ps ...predicate.Entity) *EntityDelete {
|
||||
ed.mutation.Where(ps...)
|
||||
return ed
|
||||
}
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (ed *EntityDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, ed.sqlExec, ed.mutation, ed.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (ed *EntityDelete) ExecX(ctx context.Context) int {
|
||||
n, err := ed.Exec(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (ed *EntityDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
_spec := sqlgraph.NewDeleteSpec(entity.Table, sqlgraph.NewFieldSpec(entity.FieldID, field.TypeInt))
|
||||
if ps := ed.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
affected, err := sqlgraph.DeleteNodes(ctx, ed.driver, _spec)
|
||||
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
ed.mutation.done = true
|
||||
return affected, err
|
||||
}
|
||||
|
||||
// EntityDeleteOne is the builder for deleting a single Entity entity.
|
||||
type EntityDeleteOne struct {
|
||||
ed *EntityDelete
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the EntityDelete builder.
|
||||
func (edo *EntityDeleteOne) Where(ps ...predicate.Entity) *EntityDeleteOne {
|
||||
edo.ed.mutation.Where(ps...)
|
||||
return edo
|
||||
}
|
||||
|
||||
// Exec executes the deletion query.
|
||||
func (edo *EntityDeleteOne) Exec(ctx context.Context) error {
|
||||
n, err := edo.ed.Exec(ctx)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case n == 0:
|
||||
return &NotFoundError{entity.Label}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (edo *EntityDeleteOne) ExecX(ctx context.Context) {
|
||||
if err := edo.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
786
ent/entity_query.go
Normal file
786
ent/entity_query.go
Normal file
@@ -0,0 +1,786 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/entity"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/file"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/storagepolicy"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/user"
|
||||
)
|
||||
|
||||
// EntityQuery is the builder for querying Entity entities.
|
||||
type EntityQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []entity.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.Entity
|
||||
withFile *FileQuery
|
||||
withUser *UserQuery
|
||||
withStoragePolicy *StoragePolicyQuery
|
||||
// intermediate query (i.e. traversal path).
|
||||
sql *sql.Selector
|
||||
path func(context.Context) (*sql.Selector, error)
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the EntityQuery builder.
|
||||
func (eq *EntityQuery) Where(ps ...predicate.Entity) *EntityQuery {
|
||||
eq.predicates = append(eq.predicates, ps...)
|
||||
return eq
|
||||
}
|
||||
|
||||
// Limit the number of records to be returned by this query.
|
||||
func (eq *EntityQuery) Limit(limit int) *EntityQuery {
|
||||
eq.ctx.Limit = &limit
|
||||
return eq
|
||||
}
|
||||
|
||||
// Offset to start from.
|
||||
func (eq *EntityQuery) Offset(offset int) *EntityQuery {
|
||||
eq.ctx.Offset = &offset
|
||||
return eq
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (eq *EntityQuery) Unique(unique bool) *EntityQuery {
|
||||
eq.ctx.Unique = &unique
|
||||
return eq
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (eq *EntityQuery) Order(o ...entity.OrderOption) *EntityQuery {
|
||||
eq.order = append(eq.order, o...)
|
||||
return eq
|
||||
}
|
||||
|
||||
// QueryFile chains the current query on the "file" edge.
|
||||
func (eq *EntityQuery) QueryFile() *FileQuery {
|
||||
query := (&FileClient{config: eq.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := eq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := eq.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(entity.Table, entity.FieldID, selector),
|
||||
sqlgraph.To(file.Table, file.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2M, true, entity.FileTable, entity.FilePrimaryKey...),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(eq.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryUser chains the current query on the "user" edge.
|
||||
func (eq *EntityQuery) QueryUser() *UserQuery {
|
||||
query := (&UserClient{config: eq.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := eq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := eq.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(entity.Table, entity.FieldID, selector),
|
||||
sqlgraph.To(user.Table, user.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, entity.UserTable, entity.UserColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(eq.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryStoragePolicy chains the current query on the "storage_policy" edge.
|
||||
func (eq *EntityQuery) QueryStoragePolicy() *StoragePolicyQuery {
|
||||
query := (&StoragePolicyClient{config: eq.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := eq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := eq.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(entity.Table, entity.FieldID, selector),
|
||||
sqlgraph.To(storagepolicy.Table, storagepolicy.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, entity.StoragePolicyTable, entity.StoragePolicyColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(eq.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// First returns the first Entity entity from the query.
|
||||
// Returns a *NotFoundError when no Entity was found.
|
||||
func (eq *EntityQuery) First(ctx context.Context) (*Entity, error) {
|
||||
nodes, err := eq.Limit(1).All(setContextOp(ctx, eq.ctx, "First"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nil, &NotFoundError{entity.Label}
|
||||
}
|
||||
return nodes[0], nil
|
||||
}
|
||||
|
||||
// FirstX is like First, but panics if an error occurs.
|
||||
func (eq *EntityQuery) FirstX(ctx context.Context) *Entity {
|
||||
node, err := eq.First(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// FirstID returns the first Entity ID from the query.
|
||||
// Returns a *NotFoundError when no Entity ID was found.
|
||||
func (eq *EntityQuery) FirstID(ctx context.Context) (id int, err error) {
|
||||
var ids []int
|
||||
if ids, err = eq.Limit(1).IDs(setContextOp(ctx, eq.ctx, "FirstID")); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
err = &NotFoundError{entity.Label}
|
||||
return
|
||||
}
|
||||
return ids[0], nil
|
||||
}
|
||||
|
||||
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||
func (eq *EntityQuery) FirstIDX(ctx context.Context) int {
|
||||
id, err := eq.FirstID(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// Only returns a single Entity entity found by the query, ensuring it only returns one.
|
||||
// Returns a *NotSingularError when more than one Entity entity is found.
|
||||
// Returns a *NotFoundError when no Entity entities are found.
|
||||
func (eq *EntityQuery) Only(ctx context.Context) (*Entity, error) {
|
||||
nodes, err := eq.Limit(2).All(setContextOp(ctx, eq.ctx, "Only"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(nodes) {
|
||||
case 1:
|
||||
return nodes[0], nil
|
||||
case 0:
|
||||
return nil, &NotFoundError{entity.Label}
|
||||
default:
|
||||
return nil, &NotSingularError{entity.Label}
|
||||
}
|
||||
}
|
||||
|
||||
// OnlyX is like Only, but panics if an error occurs.
|
||||
func (eq *EntityQuery) OnlyX(ctx context.Context) *Entity {
|
||||
node, err := eq.Only(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// OnlyID is like Only, but returns the only Entity ID in the query.
|
||||
// Returns a *NotSingularError when more than one Entity ID is found.
|
||||
// Returns a *NotFoundError when no entities are found.
|
||||
func (eq *EntityQuery) OnlyID(ctx context.Context) (id int, err error) {
|
||||
var ids []int
|
||||
if ids, err = eq.Limit(2).IDs(setContextOp(ctx, eq.ctx, "OnlyID")); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
case 1:
|
||||
id = ids[0]
|
||||
case 0:
|
||||
err = &NotFoundError{entity.Label}
|
||||
default:
|
||||
err = &NotSingularError{entity.Label}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||
func (eq *EntityQuery) OnlyIDX(ctx context.Context) int {
|
||||
id, err := eq.OnlyID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// All executes the query and returns a list of Entities.
|
||||
func (eq *EntityQuery) All(ctx context.Context) ([]*Entity, error) {
|
||||
ctx = setContextOp(ctx, eq.ctx, "All")
|
||||
if err := eq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qr := querierAll[[]*Entity, *EntityQuery]()
|
||||
return withInterceptors[[]*Entity](ctx, eq, qr, eq.inters)
|
||||
}
|
||||
|
||||
// AllX is like All, but panics if an error occurs.
|
||||
func (eq *EntityQuery) AllX(ctx context.Context) []*Entity {
|
||||
nodes, err := eq.All(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// IDs executes the query and returns a list of Entity IDs.
|
||||
func (eq *EntityQuery) IDs(ctx context.Context) (ids []int, err error) {
|
||||
if eq.ctx.Unique == nil && eq.path != nil {
|
||||
eq.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, eq.ctx, "IDs")
|
||||
if err = eq.Select(entity.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// IDsX is like IDs, but panics if an error occurs.
|
||||
func (eq *EntityQuery) IDsX(ctx context.Context) []int {
|
||||
ids, err := eq.IDs(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// Count returns the count of the given query.
|
||||
func (eq *EntityQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, eq.ctx, "Count")
|
||||
if err := eq.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return withInterceptors[int](ctx, eq, querierCount[*EntityQuery](), eq.inters)
|
||||
}
|
||||
|
||||
// CountX is like Count, but panics if an error occurs.
|
||||
func (eq *EntityQuery) CountX(ctx context.Context) int {
|
||||
count, err := eq.Count(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (eq *EntityQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, eq.ctx, "Exist")
|
||||
switch _, err := eq.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExistX is like Exist, but panics if an error occurs.
|
||||
func (eq *EntityQuery) ExistX(ctx context.Context) bool {
|
||||
exist, err := eq.Exist(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return exist
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the EntityQuery builder, including all associated steps. It can be
|
||||
// used to prepare common query builders and use them differently after the clone is made.
|
||||
func (eq *EntityQuery) Clone() *EntityQuery {
|
||||
if eq == nil {
|
||||
return nil
|
||||
}
|
||||
return &EntityQuery{
|
||||
config: eq.config,
|
||||
ctx: eq.ctx.Clone(),
|
||||
order: append([]entity.OrderOption{}, eq.order...),
|
||||
inters: append([]Interceptor{}, eq.inters...),
|
||||
predicates: append([]predicate.Entity{}, eq.predicates...),
|
||||
withFile: eq.withFile.Clone(),
|
||||
withUser: eq.withUser.Clone(),
|
||||
withStoragePolicy: eq.withStoragePolicy.Clone(),
|
||||
// clone intermediate query.
|
||||
sql: eq.sql.Clone(),
|
||||
path: eq.path,
|
||||
}
|
||||
}
|
||||
|
||||
// WithFile tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "file" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (eq *EntityQuery) WithFile(opts ...func(*FileQuery)) *EntityQuery {
|
||||
query := (&FileClient{config: eq.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
eq.withFile = query
|
||||
return eq
|
||||
}
|
||||
|
||||
// WithUser tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "user" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (eq *EntityQuery) WithUser(opts ...func(*UserQuery)) *EntityQuery {
|
||||
query := (&UserClient{config: eq.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
eq.withUser = query
|
||||
return eq
|
||||
}
|
||||
|
||||
// WithStoragePolicy tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "storage_policy" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (eq *EntityQuery) WithStoragePolicy(opts ...func(*StoragePolicyQuery)) *EntityQuery {
|
||||
query := (&StoragePolicyClient{config: eq.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
eq.withStoragePolicy = query
|
||||
return eq
|
||||
}
|
||||
|
||||
// GroupBy is used to group vertices by one or more fields/columns.
|
||||
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// Count int `json:"count,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.Entity.Query().
|
||||
// GroupBy(entity.FieldCreatedAt).
|
||||
// Aggregate(ent.Count()).
|
||||
// Scan(ctx, &v)
|
||||
func (eq *EntityQuery) GroupBy(field string, fields ...string) *EntityGroupBy {
|
||||
eq.ctx.Fields = append([]string{field}, fields...)
|
||||
grbuild := &EntityGroupBy{build: eq}
|
||||
grbuild.flds = &eq.ctx.Fields
|
||||
grbuild.label = entity.Label
|
||||
grbuild.scan = grbuild.Scan
|
||||
return grbuild
|
||||
}
|
||||
|
||||
// Select allows the selection one or more fields/columns for the given query,
|
||||
// instead of selecting all fields in the entity.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.Entity.Query().
|
||||
// Select(entity.FieldCreatedAt).
|
||||
// Scan(ctx, &v)
|
||||
func (eq *EntityQuery) Select(fields ...string) *EntitySelect {
|
||||
eq.ctx.Fields = append(eq.ctx.Fields, fields...)
|
||||
sbuild := &EntitySelect{EntityQuery: eq}
|
||||
sbuild.label = entity.Label
|
||||
sbuild.flds, sbuild.scan = &eq.ctx.Fields, sbuild.Scan
|
||||
return sbuild
|
||||
}
|
||||
|
||||
// Aggregate returns a EntitySelect configured with the given aggregations.
|
||||
func (eq *EntityQuery) Aggregate(fns ...AggregateFunc) *EntitySelect {
|
||||
return eq.Select().Aggregate(fns...)
|
||||
}
|
||||
|
||||
func (eq *EntityQuery) prepareQuery(ctx context.Context) error {
|
||||
for _, inter := range eq.inters {
|
||||
if inter == nil {
|
||||
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||
}
|
||||
if trv, ok := inter.(Traverser); ok {
|
||||
if err := trv.Traverse(ctx, eq); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, f := range eq.ctx.Fields {
|
||||
if !entity.ValidColumn(f) {
|
||||
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
}
|
||||
if eq.path != nil {
|
||||
prev, err := eq.path(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
eq.sql = prev
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (eq *EntityQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Entity, error) {
|
||||
var (
|
||||
nodes = []*Entity{}
|
||||
_spec = eq.querySpec()
|
||||
loadedTypes = [3]bool{
|
||||
eq.withFile != nil,
|
||||
eq.withUser != nil,
|
||||
eq.withStoragePolicy != nil,
|
||||
}
|
||||
)
|
||||
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||
return (*Entity).scanValues(nil, columns)
|
||||
}
|
||||
_spec.Assign = func(columns []string, values []any) error {
|
||||
node := &Entity{config: eq.config}
|
||||
nodes = append(nodes, node)
|
||||
node.Edges.loadedTypes = loadedTypes
|
||||
return node.assignValues(columns, values)
|
||||
}
|
||||
for i := range hooks {
|
||||
hooks[i](ctx, _spec)
|
||||
}
|
||||
if err := sqlgraph.QueryNodes(ctx, eq.driver, _spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nodes, nil
|
||||
}
|
||||
if query := eq.withFile; query != nil {
|
||||
if err := eq.loadFile(ctx, query, nodes,
|
||||
func(n *Entity) { n.Edges.File = []*File{} },
|
||||
func(n *Entity, e *File) { n.Edges.File = append(n.Edges.File, e) }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if query := eq.withUser; query != nil {
|
||||
if err := eq.loadUser(ctx, query, nodes, nil,
|
||||
func(n *Entity, e *User) { n.Edges.User = e }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if query := eq.withStoragePolicy; query != nil {
|
||||
if err := eq.loadStoragePolicy(ctx, query, nodes, nil,
|
||||
func(n *Entity, e *StoragePolicy) { n.Edges.StoragePolicy = e }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (eq *EntityQuery) loadFile(ctx context.Context, query *FileQuery, nodes []*Entity, init func(*Entity), assign func(*Entity, *File)) error {
|
||||
edgeIDs := make([]driver.Value, len(nodes))
|
||||
byID := make(map[int]*Entity)
|
||||
nids := make(map[int]map[*Entity]struct{})
|
||||
for i, node := range nodes {
|
||||
edgeIDs[i] = node.ID
|
||||
byID[node.ID] = node
|
||||
if init != nil {
|
||||
init(node)
|
||||
}
|
||||
}
|
||||
query.Where(func(s *sql.Selector) {
|
||||
joinT := sql.Table(entity.FileTable)
|
||||
s.Join(joinT).On(s.C(file.FieldID), joinT.C(entity.FilePrimaryKey[0]))
|
||||
s.Where(sql.InValues(joinT.C(entity.FilePrimaryKey[1]), edgeIDs...))
|
||||
columns := s.SelectedColumns()
|
||||
s.Select(joinT.C(entity.FilePrimaryKey[1]))
|
||||
s.AppendSelect(columns...)
|
||||
s.SetDistinct(false)
|
||||
})
|
||||
if err := query.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
|
||||
return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) {
|
||||
assign := spec.Assign
|
||||
values := spec.ScanValues
|
||||
spec.ScanValues = func(columns []string) ([]any, error) {
|
||||
values, err := values(columns[1:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return append([]any{new(sql.NullInt64)}, values...), nil
|
||||
}
|
||||
spec.Assign = func(columns []string, values []any) error {
|
||||
outValue := int(values[0].(*sql.NullInt64).Int64)
|
||||
inValue := int(values[1].(*sql.NullInt64).Int64)
|
||||
if nids[inValue] == nil {
|
||||
nids[inValue] = map[*Entity]struct{}{byID[outValue]: {}}
|
||||
return assign(columns[1:], values[1:])
|
||||
}
|
||||
nids[inValue][byID[outValue]] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
})
|
||||
})
|
||||
neighbors, err := withInterceptors[[]*File](ctx, query, qr, query.inters)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
nodes, ok := nids[n.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected "file" node returned %v`, n.ID)
|
||||
}
|
||||
for kn := range nodes {
|
||||
assign(kn, n)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (eq *EntityQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*Entity, init func(*Entity), assign func(*Entity, *User)) error {
|
||||
ids := make([]int, 0, len(nodes))
|
||||
nodeids := make(map[int][]*Entity)
|
||||
for i := range nodes {
|
||||
fk := nodes[i].CreatedBy
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
query.Where(user.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
nodes, ok := nodeids[n.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "created_by" returned %v`, n.ID)
|
||||
}
|
||||
for i := range nodes {
|
||||
assign(nodes[i], n)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (eq *EntityQuery) loadStoragePolicy(ctx context.Context, query *StoragePolicyQuery, nodes []*Entity, init func(*Entity), assign func(*Entity, *StoragePolicy)) error {
|
||||
ids := make([]int, 0, len(nodes))
|
||||
nodeids := make(map[int][]*Entity)
|
||||
for i := range nodes {
|
||||
fk := nodes[i].StoragePolicyEntities
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
query.Where(storagepolicy.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
nodes, ok := nodeids[n.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "storage_policy_entities" returned %v`, n.ID)
|
||||
}
|
||||
for i := range nodes {
|
||||
assign(nodes[i], n)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (eq *EntityQuery) sqlCount(ctx context.Context) (int, error) {
|
||||
_spec := eq.querySpec()
|
||||
_spec.Node.Columns = eq.ctx.Fields
|
||||
if len(eq.ctx.Fields) > 0 {
|
||||
_spec.Unique = eq.ctx.Unique != nil && *eq.ctx.Unique
|
||||
}
|
||||
return sqlgraph.CountNodes(ctx, eq.driver, _spec)
|
||||
}
|
||||
|
||||
func (eq *EntityQuery) querySpec() *sqlgraph.QuerySpec {
|
||||
_spec := sqlgraph.NewQuerySpec(entity.Table, entity.Columns, sqlgraph.NewFieldSpec(entity.FieldID, field.TypeInt))
|
||||
_spec.From = eq.sql
|
||||
if unique := eq.ctx.Unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
} else if eq.path != nil {
|
||||
_spec.Unique = true
|
||||
}
|
||||
if fields := eq.ctx.Fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, entity.FieldID)
|
||||
for i := range fields {
|
||||
if fields[i] != entity.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||
}
|
||||
}
|
||||
if eq.withUser != nil {
|
||||
_spec.Node.AddColumnOnce(entity.FieldCreatedBy)
|
||||
}
|
||||
if eq.withStoragePolicy != nil {
|
||||
_spec.Node.AddColumnOnce(entity.FieldStoragePolicyEntities)
|
||||
}
|
||||
}
|
||||
if ps := eq.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if limit := eq.ctx.Limit; limit != nil {
|
||||
_spec.Limit = *limit
|
||||
}
|
||||
if offset := eq.ctx.Offset; offset != nil {
|
||||
_spec.Offset = *offset
|
||||
}
|
||||
if ps := eq.order; len(ps) > 0 {
|
||||
_spec.Order = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
return _spec
|
||||
}
|
||||
|
||||
func (eq *EntityQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(eq.driver.Dialect())
|
||||
t1 := builder.Table(entity.Table)
|
||||
columns := eq.ctx.Fields
|
||||
if len(columns) == 0 {
|
||||
columns = entity.Columns
|
||||
}
|
||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||
if eq.sql != nil {
|
||||
selector = eq.sql
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
if eq.ctx.Unique != nil && *eq.ctx.Unique {
|
||||
selector.Distinct()
|
||||
}
|
||||
for _, p := range eq.predicates {
|
||||
p(selector)
|
||||
}
|
||||
for _, p := range eq.order {
|
||||
p(selector)
|
||||
}
|
||||
if offset := eq.ctx.Offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
// with default value, and override it below if needed.
|
||||
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||
}
|
||||
if limit := eq.ctx.Limit; limit != nil {
|
||||
selector.Limit(*limit)
|
||||
}
|
||||
return selector
|
||||
}
|
||||
|
||||
// EntityGroupBy is the group-by builder for Entity entities.
|
||||
type EntityGroupBy struct {
|
||||
selector
|
||||
build *EntityQuery
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the group-by query.
|
||||
func (egb *EntityGroupBy) Aggregate(fns ...AggregateFunc) *EntityGroupBy {
|
||||
egb.fns = append(egb.fns, fns...)
|
||||
return egb
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (egb *EntityGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, egb.build.ctx, "GroupBy")
|
||||
if err := egb.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*EntityQuery, *EntityGroupBy](ctx, egb.build, egb, egb.build.inters, v)
|
||||
}
|
||||
|
||||
func (egb *EntityGroupBy) sqlScan(ctx context.Context, root *EntityQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx).Select()
|
||||
aggregation := make([]string, 0, len(egb.fns))
|
||||
for _, fn := range egb.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
if len(selector.SelectedColumns()) == 0 {
|
||||
columns := make([]string, 0, len(*egb.flds)+len(egb.fns))
|
||||
for _, f := range *egb.flds {
|
||||
columns = append(columns, selector.C(f))
|
||||
}
|
||||
columns = append(columns, aggregation...)
|
||||
selector.Select(columns...)
|
||||
}
|
||||
selector.GroupBy(selector.Columns(*egb.flds...)...)
|
||||
if err := selector.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := egb.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
// EntitySelect is the builder for selecting fields of Entity entities.
|
||||
type EntitySelect struct {
|
||||
*EntityQuery
|
||||
selector
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the selector query.
|
||||
func (es *EntitySelect) Aggregate(fns ...AggregateFunc) *EntitySelect {
|
||||
es.fns = append(es.fns, fns...)
|
||||
return es
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (es *EntitySelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, es.ctx, "Select")
|
||||
if err := es.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*EntityQuery, *EntitySelect](ctx, es.EntityQuery, es, es.inters, v)
|
||||
}
|
||||
|
||||
func (es *EntitySelect) sqlScan(ctx context.Context, root *EntityQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx)
|
||||
aggregation := make([]string, 0, len(es.fns))
|
||||
for _, fn := range es.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
switch n := len(*es.selector.flds); {
|
||||
case n == 0 && len(aggregation) > 0:
|
||||
selector.Select(aggregation...)
|
||||
case n != 0 && len(aggregation) > 0:
|
||||
selector.AppendSelect(aggregation...)
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := es.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
1017
ent/entity_update.go
Normal file
1017
ent/entity_update.go
Normal file
File diff suppressed because it is too large
Load Diff
84
ent/enttest/enttest.go
Normal file
84
ent/enttest/enttest.go
Normal file
@@ -0,0 +1,84 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package enttest
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
// required by schema hooks.
|
||||
_ "github.com/cloudreve/Cloudreve/v4/ent/runtime"
|
||||
|
||||
"entgo.io/ent/dialect/sql/schema"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/migrate"
|
||||
)
|
||||
|
||||
type (
|
||||
// TestingT is the interface that is shared between
|
||||
// testing.T and testing.B and used by enttest.
|
||||
TestingT interface {
|
||||
FailNow()
|
||||
Error(...any)
|
||||
}
|
||||
|
||||
// Option configures client creation.
|
||||
Option func(*options)
|
||||
|
||||
options struct {
|
||||
opts []ent.Option
|
||||
migrateOpts []schema.MigrateOption
|
||||
}
|
||||
)
|
||||
|
||||
// WithOptions forwards options to client creation.
|
||||
func WithOptions(opts ...ent.Option) Option {
|
||||
return func(o *options) {
|
||||
o.opts = append(o.opts, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// WithMigrateOptions forwards options to auto migration.
|
||||
func WithMigrateOptions(opts ...schema.MigrateOption) Option {
|
||||
return func(o *options) {
|
||||
o.migrateOpts = append(o.migrateOpts, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
func newOptions(opts []Option) *options {
|
||||
o := &options{}
|
||||
for _, opt := range opts {
|
||||
opt(o)
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// Open calls ent.Open and auto-run migration.
|
||||
func Open(t TestingT, driverName, dataSourceName string, opts ...Option) *ent.Client {
|
||||
o := newOptions(opts)
|
||||
c, err := ent.Open(driverName, dataSourceName, o.opts...)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.FailNow()
|
||||
}
|
||||
migrateSchema(t, c, o)
|
||||
return c
|
||||
}
|
||||
|
||||
// NewClient calls ent.NewClient and auto-run migration.
|
||||
func NewClient(t TestingT, opts ...Option) *ent.Client {
|
||||
o := newOptions(opts)
|
||||
c := ent.NewClient(o.opts...)
|
||||
migrateSchema(t, c, o)
|
||||
return c
|
||||
}
|
||||
func migrateSchema(t TestingT, c *ent.Client, o *options) {
|
||||
tables, err := schema.CopyTables(migrate.Tables)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.FailNow()
|
||||
}
|
||||
if err := migrate.Create(context.Background(), c.Schema, tables, o.migrateOpts...); err != nil {
|
||||
t.Error(err)
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
424
ent/file.go
Normal file
424
ent/file.go
Normal file
@@ -0,0 +1,424 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/file"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/storagepolicy"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/user"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
)
|
||||
|
||||
// File is the model entity for the File schema.
|
||||
type File struct {
|
||||
config `json:"-"`
|
||||
// ID of the ent.
|
||||
ID int `json:"id,omitempty"`
|
||||
// CreatedAt holds the value of the "created_at" field.
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// UpdatedAt holds the value of the "updated_at" field.
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
// Type holds the value of the "type" field.
|
||||
Type int `json:"type,omitempty"`
|
||||
// Name holds the value of the "name" field.
|
||||
Name string `json:"name,omitempty"`
|
||||
// OwnerID holds the value of the "owner_id" field.
|
||||
OwnerID int `json:"owner_id,omitempty"`
|
||||
// Size holds the value of the "size" field.
|
||||
Size int64 `json:"size,omitempty"`
|
||||
// PrimaryEntity holds the value of the "primary_entity" field.
|
||||
PrimaryEntity int `json:"primary_entity,omitempty"`
|
||||
// FileChildren holds the value of the "file_children" field.
|
||||
FileChildren int `json:"file_children,omitempty"`
|
||||
// IsSymbolic holds the value of the "is_symbolic" field.
|
||||
IsSymbolic bool `json:"is_symbolic,omitempty"`
|
||||
// Props holds the value of the "props" field.
|
||||
Props *types.FileProps `json:"props,omitempty"`
|
||||
// StoragePolicyFiles holds the value of the "storage_policy_files" field.
|
||||
StoragePolicyFiles int `json:"storage_policy_files,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the FileQuery when eager-loading is set.
|
||||
Edges FileEdges `json:"edges"`
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// FileEdges holds the relations/edges for other nodes in the graph.
|
||||
type FileEdges struct {
|
||||
// Owner holds the value of the owner edge.
|
||||
Owner *User `json:"owner,omitempty"`
|
||||
// StoragePolicies holds the value of the storage_policies edge.
|
||||
StoragePolicies *StoragePolicy `json:"storage_policies,omitempty"`
|
||||
// Parent holds the value of the parent edge.
|
||||
Parent *File `json:"parent,omitempty"`
|
||||
// Children holds the value of the children edge.
|
||||
Children []*File `json:"children,omitempty"`
|
||||
// Metadata holds the value of the metadata edge.
|
||||
Metadata []*Metadata `json:"metadata,omitempty"`
|
||||
// Entities holds the value of the entities edge.
|
||||
Entities []*Entity `json:"entities,omitempty"`
|
||||
// Shares holds the value of the shares edge.
|
||||
Shares []*Share `json:"shares,omitempty"`
|
||||
// DirectLinks holds the value of the direct_links edge.
|
||||
DirectLinks []*DirectLink `json:"direct_links,omitempty"`
|
||||
// loadedTypes holds the information for reporting if a
|
||||
// type was loaded (or requested) in eager-loading or not.
|
||||
loadedTypes [8]bool
|
||||
}
|
||||
|
||||
// OwnerOrErr returns the Owner value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e FileEdges) OwnerOrErr() (*User, error) {
|
||||
if e.loadedTypes[0] {
|
||||
if e.Owner == nil {
|
||||
// Edge was loaded but was not found.
|
||||
return nil, &NotFoundError{label: user.Label}
|
||||
}
|
||||
return e.Owner, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "owner"}
|
||||
}
|
||||
|
||||
// StoragePoliciesOrErr returns the StoragePolicies value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e FileEdges) StoragePoliciesOrErr() (*StoragePolicy, error) {
|
||||
if e.loadedTypes[1] {
|
||||
if e.StoragePolicies == nil {
|
||||
// Edge was loaded but was not found.
|
||||
return nil, &NotFoundError{label: storagepolicy.Label}
|
||||
}
|
||||
return e.StoragePolicies, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "storage_policies"}
|
||||
}
|
||||
|
||||
// ParentOrErr returns the Parent value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e FileEdges) ParentOrErr() (*File, error) {
|
||||
if e.loadedTypes[2] {
|
||||
if e.Parent == nil {
|
||||
// Edge was loaded but was not found.
|
||||
return nil, &NotFoundError{label: file.Label}
|
||||
}
|
||||
return e.Parent, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "parent"}
|
||||
}
|
||||
|
||||
// ChildrenOrErr returns the Children value or an error if the edge
|
||||
// was not loaded in eager-loading.
|
||||
func (e FileEdges) ChildrenOrErr() ([]*File, error) {
|
||||
if e.loadedTypes[3] {
|
||||
return e.Children, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "children"}
|
||||
}
|
||||
|
||||
// MetadataOrErr returns the Metadata value or an error if the edge
|
||||
// was not loaded in eager-loading.
|
||||
func (e FileEdges) MetadataOrErr() ([]*Metadata, error) {
|
||||
if e.loadedTypes[4] {
|
||||
return e.Metadata, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "metadata"}
|
||||
}
|
||||
|
||||
// EntitiesOrErr returns the Entities value or an error if the edge
|
||||
// was not loaded in eager-loading.
|
||||
func (e FileEdges) EntitiesOrErr() ([]*Entity, error) {
|
||||
if e.loadedTypes[5] {
|
||||
return e.Entities, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "entities"}
|
||||
}
|
||||
|
||||
// SharesOrErr returns the Shares value or an error if the edge
|
||||
// was not loaded in eager-loading.
|
||||
func (e FileEdges) SharesOrErr() ([]*Share, error) {
|
||||
if e.loadedTypes[6] {
|
||||
return e.Shares, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "shares"}
|
||||
}
|
||||
|
||||
// DirectLinksOrErr returns the DirectLinks value or an error if the edge
|
||||
// was not loaded in eager-loading.
|
||||
func (e FileEdges) DirectLinksOrErr() ([]*DirectLink, error) {
|
||||
if e.loadedTypes[7] {
|
||||
return e.DirectLinks, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "direct_links"}
|
||||
}
|
||||
|
||||
// scanValues returns the types for scanning values from sql.Rows.
|
||||
func (*File) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case file.FieldProps:
|
||||
values[i] = new([]byte)
|
||||
case file.FieldIsSymbolic:
|
||||
values[i] = new(sql.NullBool)
|
||||
case file.FieldID, file.FieldType, file.FieldOwnerID, file.FieldSize, file.FieldPrimaryEntity, file.FieldFileChildren, file.FieldStoragePolicyFiles:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case file.FieldName:
|
||||
values[i] = new(sql.NullString)
|
||||
case file.FieldCreatedAt, file.FieldUpdatedAt:
|
||||
values[i] = new(sql.NullTime)
|
||||
default:
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||
// to the File fields.
|
||||
func (f *File) assignValues(columns []string, values []any) error {
|
||||
if m, n := len(values), len(columns); m < n {
|
||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||
}
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case file.FieldID:
|
||||
value, ok := values[i].(*sql.NullInt64)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected type %T for field id", value)
|
||||
}
|
||||
f.ID = int(value.Int64)
|
||||
case file.FieldCreatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||
} else if value.Valid {
|
||||
f.CreatedAt = value.Time
|
||||
}
|
||||
case file.FieldUpdatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||
} else if value.Valid {
|
||||
f.UpdatedAt = value.Time
|
||||
}
|
||||
case file.FieldType:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field type", values[i])
|
||||
} else if value.Valid {
|
||||
f.Type = int(value.Int64)
|
||||
}
|
||||
case file.FieldName:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field name", values[i])
|
||||
} else if value.Valid {
|
||||
f.Name = value.String
|
||||
}
|
||||
case file.FieldOwnerID:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field owner_id", values[i])
|
||||
} else if value.Valid {
|
||||
f.OwnerID = int(value.Int64)
|
||||
}
|
||||
case file.FieldSize:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field size", values[i])
|
||||
} else if value.Valid {
|
||||
f.Size = value.Int64
|
||||
}
|
||||
case file.FieldPrimaryEntity:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field primary_entity", values[i])
|
||||
} else if value.Valid {
|
||||
f.PrimaryEntity = int(value.Int64)
|
||||
}
|
||||
case file.FieldFileChildren:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field file_children", values[i])
|
||||
} else if value.Valid {
|
||||
f.FileChildren = int(value.Int64)
|
||||
}
|
||||
case file.FieldIsSymbolic:
|
||||
if value, ok := values[i].(*sql.NullBool); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field is_symbolic", values[i])
|
||||
} else if value.Valid {
|
||||
f.IsSymbolic = value.Bool
|
||||
}
|
||||
case file.FieldProps:
|
||||
if value, ok := values[i].(*[]byte); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field props", values[i])
|
||||
} else if value != nil && len(*value) > 0 {
|
||||
if err := json.Unmarshal(*value, &f.Props); err != nil {
|
||||
return fmt.Errorf("unmarshal field props: %w", err)
|
||||
}
|
||||
}
|
||||
case file.FieldStoragePolicyFiles:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field storage_policy_files", values[i])
|
||||
} else if value.Valid {
|
||||
f.StoragePolicyFiles = int(value.Int64)
|
||||
}
|
||||
default:
|
||||
f.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the File.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (f *File) Value(name string) (ent.Value, error) {
|
||||
return f.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryOwner queries the "owner" edge of the File entity.
|
||||
func (f *File) QueryOwner() *UserQuery {
|
||||
return NewFileClient(f.config).QueryOwner(f)
|
||||
}
|
||||
|
||||
// QueryStoragePolicies queries the "storage_policies" edge of the File entity.
|
||||
func (f *File) QueryStoragePolicies() *StoragePolicyQuery {
|
||||
return NewFileClient(f.config).QueryStoragePolicies(f)
|
||||
}
|
||||
|
||||
// QueryParent queries the "parent" edge of the File entity.
|
||||
func (f *File) QueryParent() *FileQuery {
|
||||
return NewFileClient(f.config).QueryParent(f)
|
||||
}
|
||||
|
||||
// QueryChildren queries the "children" edge of the File entity.
|
||||
func (f *File) QueryChildren() *FileQuery {
|
||||
return NewFileClient(f.config).QueryChildren(f)
|
||||
}
|
||||
|
||||
// QueryMetadata queries the "metadata" edge of the File entity.
|
||||
func (f *File) QueryMetadata() *MetadataQuery {
|
||||
return NewFileClient(f.config).QueryMetadata(f)
|
||||
}
|
||||
|
||||
// QueryEntities queries the "entities" edge of the File entity.
|
||||
func (f *File) QueryEntities() *EntityQuery {
|
||||
return NewFileClient(f.config).QueryEntities(f)
|
||||
}
|
||||
|
||||
// QueryShares queries the "shares" edge of the File entity.
|
||||
func (f *File) QueryShares() *ShareQuery {
|
||||
return NewFileClient(f.config).QueryShares(f)
|
||||
}
|
||||
|
||||
// QueryDirectLinks queries the "direct_links" edge of the File entity.
|
||||
func (f *File) QueryDirectLinks() *DirectLinkQuery {
|
||||
return NewFileClient(f.config).QueryDirectLinks(f)
|
||||
}
|
||||
|
||||
// Update returns a builder for updating this File.
|
||||
// Note that you need to call File.Unwrap() before calling this method if this File
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
func (f *File) Update() *FileUpdateOne {
|
||||
return NewFileClient(f.config).UpdateOne(f)
|
||||
}
|
||||
|
||||
// Unwrap unwraps the File entity that was returned from a transaction after it was closed,
|
||||
// so that all future queries will be executed through the driver which created the transaction.
|
||||
func (f *File) Unwrap() *File {
|
||||
_tx, ok := f.config.driver.(*txDriver)
|
||||
if !ok {
|
||||
panic("ent: File is not a transactional entity")
|
||||
}
|
||||
f.config.driver = _tx.drv
|
||||
return f
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer.
|
||||
func (f *File) String() string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("File(")
|
||||
builder.WriteString(fmt.Sprintf("id=%v, ", f.ID))
|
||||
builder.WriteString("created_at=")
|
||||
builder.WriteString(f.CreatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("updated_at=")
|
||||
builder.WriteString(f.UpdatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("type=")
|
||||
builder.WriteString(fmt.Sprintf("%v", f.Type))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("name=")
|
||||
builder.WriteString(f.Name)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("owner_id=")
|
||||
builder.WriteString(fmt.Sprintf("%v", f.OwnerID))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("size=")
|
||||
builder.WriteString(fmt.Sprintf("%v", f.Size))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("primary_entity=")
|
||||
builder.WriteString(fmt.Sprintf("%v", f.PrimaryEntity))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("file_children=")
|
||||
builder.WriteString(fmt.Sprintf("%v", f.FileChildren))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("is_symbolic=")
|
||||
builder.WriteString(fmt.Sprintf("%v", f.IsSymbolic))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("props=")
|
||||
builder.WriteString(fmt.Sprintf("%v", f.Props))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("storage_policy_files=")
|
||||
builder.WriteString(fmt.Sprintf("%v", f.StoragePolicyFiles))
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// SetOwner manually set the edge as loaded state.
|
||||
func (e *File) SetOwner(v *User) {
|
||||
e.Edges.Owner = v
|
||||
e.Edges.loadedTypes[0] = true
|
||||
}
|
||||
|
||||
// SetStoragePolicies manually set the edge as loaded state.
|
||||
func (e *File) SetStoragePolicies(v *StoragePolicy) {
|
||||
e.Edges.StoragePolicies = v
|
||||
e.Edges.loadedTypes[1] = true
|
||||
}
|
||||
|
||||
// SetParent manually set the edge as loaded state.
|
||||
func (e *File) SetParent(v *File) {
|
||||
e.Edges.Parent = v
|
||||
e.Edges.loadedTypes[2] = true
|
||||
}
|
||||
|
||||
// SetChildren manually set the edge as loaded state.
|
||||
func (e *File) SetChildren(v []*File) {
|
||||
e.Edges.Children = v
|
||||
e.Edges.loadedTypes[3] = true
|
||||
}
|
||||
|
||||
// SetMetadata manually set the edge as loaded state.
|
||||
func (e *File) SetMetadata(v []*Metadata) {
|
||||
e.Edges.Metadata = v
|
||||
e.Edges.loadedTypes[4] = true
|
||||
}
|
||||
|
||||
// SetEntities manually set the edge as loaded state.
|
||||
func (e *File) SetEntities(v []*Entity) {
|
||||
e.Edges.Entities = v
|
||||
e.Edges.loadedTypes[5] = true
|
||||
}
|
||||
|
||||
// SetShares manually set the edge as loaded state.
|
||||
func (e *File) SetShares(v []*Share) {
|
||||
e.Edges.Shares = v
|
||||
e.Edges.loadedTypes[6] = true
|
||||
}
|
||||
|
||||
// SetDirectLinks manually set the edge as loaded state.
|
||||
func (e *File) SetDirectLinks(v []*DirectLink) {
|
||||
e.Edges.DirectLinks = v
|
||||
e.Edges.loadedTypes[7] = true
|
||||
}
|
||||
|
||||
// Files is a parsable slice of File.
|
||||
type Files []*File
|
||||
360
ent/file/file.go
Normal file
360
ent/file/file.go
Normal file
@@ -0,0 +1,360 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package file
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
const (
|
||||
// Label holds the string label denoting the file type in the database.
|
||||
Label = "file"
|
||||
// FieldID holds the string denoting the id field in the database.
|
||||
FieldID = "id"
|
||||
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||
FieldCreatedAt = "created_at"
|
||||
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||
FieldUpdatedAt = "updated_at"
|
||||
// FieldType holds the string denoting the type field in the database.
|
||||
FieldType = "type"
|
||||
// FieldName holds the string denoting the name field in the database.
|
||||
FieldName = "name"
|
||||
// FieldOwnerID holds the string denoting the owner_id field in the database.
|
||||
FieldOwnerID = "owner_id"
|
||||
// FieldSize holds the string denoting the size field in the database.
|
||||
FieldSize = "size"
|
||||
// FieldPrimaryEntity holds the string denoting the primary_entity field in the database.
|
||||
FieldPrimaryEntity = "primary_entity"
|
||||
// FieldFileChildren holds the string denoting the file_children field in the database.
|
||||
FieldFileChildren = "file_children"
|
||||
// FieldIsSymbolic holds the string denoting the is_symbolic field in the database.
|
||||
FieldIsSymbolic = "is_symbolic"
|
||||
// FieldProps holds the string denoting the props field in the database.
|
||||
FieldProps = "props"
|
||||
// FieldStoragePolicyFiles holds the string denoting the storage_policy_files field in the database.
|
||||
FieldStoragePolicyFiles = "storage_policy_files"
|
||||
// EdgeOwner holds the string denoting the owner edge name in mutations.
|
||||
EdgeOwner = "owner"
|
||||
// EdgeStoragePolicies holds the string denoting the storage_policies edge name in mutations.
|
||||
EdgeStoragePolicies = "storage_policies"
|
||||
// EdgeParent holds the string denoting the parent edge name in mutations.
|
||||
EdgeParent = "parent"
|
||||
// EdgeChildren holds the string denoting the children edge name in mutations.
|
||||
EdgeChildren = "children"
|
||||
// EdgeMetadata holds the string denoting the metadata edge name in mutations.
|
||||
EdgeMetadata = "metadata"
|
||||
// EdgeEntities holds the string denoting the entities edge name in mutations.
|
||||
EdgeEntities = "entities"
|
||||
// EdgeShares holds the string denoting the shares edge name in mutations.
|
||||
EdgeShares = "shares"
|
||||
// EdgeDirectLinks holds the string denoting the direct_links edge name in mutations.
|
||||
EdgeDirectLinks = "direct_links"
|
||||
// Table holds the table name of the file in the database.
|
||||
Table = "files"
|
||||
// OwnerTable is the table that holds the owner relation/edge.
|
||||
OwnerTable = "files"
|
||||
// OwnerInverseTable is the table name for the User entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "user" package.
|
||||
OwnerInverseTable = "users"
|
||||
// OwnerColumn is the table column denoting the owner relation/edge.
|
||||
OwnerColumn = "owner_id"
|
||||
// StoragePoliciesTable is the table that holds the storage_policies relation/edge.
|
||||
StoragePoliciesTable = "files"
|
||||
// StoragePoliciesInverseTable is the table name for the StoragePolicy entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "storagepolicy" package.
|
||||
StoragePoliciesInverseTable = "storage_policies"
|
||||
// StoragePoliciesColumn is the table column denoting the storage_policies relation/edge.
|
||||
StoragePoliciesColumn = "storage_policy_files"
|
||||
// ParentTable is the table that holds the parent relation/edge.
|
||||
ParentTable = "files"
|
||||
// ParentColumn is the table column denoting the parent relation/edge.
|
||||
ParentColumn = "file_children"
|
||||
// ChildrenTable is the table that holds the children relation/edge.
|
||||
ChildrenTable = "files"
|
||||
// ChildrenColumn is the table column denoting the children relation/edge.
|
||||
ChildrenColumn = "file_children"
|
||||
// MetadataTable is the table that holds the metadata relation/edge.
|
||||
MetadataTable = "metadata"
|
||||
// MetadataInverseTable is the table name for the Metadata entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "metadata" package.
|
||||
MetadataInverseTable = "metadata"
|
||||
// MetadataColumn is the table column denoting the metadata relation/edge.
|
||||
MetadataColumn = "file_id"
|
||||
// EntitiesTable is the table that holds the entities relation/edge. The primary key declared below.
|
||||
EntitiesTable = "file_entities"
|
||||
// EntitiesInverseTable is the table name for the Entity entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "entity" package.
|
||||
EntitiesInverseTable = "entities"
|
||||
// SharesTable is the table that holds the shares relation/edge.
|
||||
SharesTable = "shares"
|
||||
// SharesInverseTable is the table name for the Share entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "share" package.
|
||||
SharesInverseTable = "shares"
|
||||
// SharesColumn is the table column denoting the shares relation/edge.
|
||||
SharesColumn = "file_shares"
|
||||
// DirectLinksTable is the table that holds the direct_links relation/edge.
|
||||
DirectLinksTable = "direct_links"
|
||||
// DirectLinksInverseTable is the table name for the DirectLink entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "directlink" package.
|
||||
DirectLinksInverseTable = "direct_links"
|
||||
// DirectLinksColumn is the table column denoting the direct_links relation/edge.
|
||||
DirectLinksColumn = "file_id"
|
||||
)
|
||||
|
||||
// Columns holds all SQL columns for file fields.
|
||||
var Columns = []string{
|
||||
FieldID,
|
||||
FieldCreatedAt,
|
||||
FieldUpdatedAt,
|
||||
FieldType,
|
||||
FieldName,
|
||||
FieldOwnerID,
|
||||
FieldSize,
|
||||
FieldPrimaryEntity,
|
||||
FieldFileChildren,
|
||||
FieldIsSymbolic,
|
||||
FieldProps,
|
||||
FieldStoragePolicyFiles,
|
||||
}
|
||||
|
||||
var (
|
||||
// EntitiesPrimaryKey and EntitiesColumn2 are the table columns denoting the
|
||||
// primary key for the entities relation (M2M).
|
||||
EntitiesPrimaryKey = []string{"file_id", "entity_id"}
|
||||
)
|
||||
|
||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||
func ValidColumn(column string) bool {
|
||||
for i := range Columns {
|
||||
if column == Columns[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Note that the variables below are initialized by the runtime
|
||||
// package on the initialization of the application. Therefore,
|
||||
// it should be imported in the main as follows:
|
||||
//
|
||||
// import _ "github.com/cloudreve/Cloudreve/v4/ent/runtime"
|
||||
var (
|
||||
Hooks [1]ent.Hook
|
||||
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||
DefaultCreatedAt func() time.Time
|
||||
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||
DefaultUpdatedAt func() time.Time
|
||||
// DefaultSize holds the default value on creation for the "size" field.
|
||||
DefaultSize int64
|
||||
// DefaultIsSymbolic holds the default value on creation for the "is_symbolic" field.
|
||||
DefaultIsSymbolic bool
|
||||
)
|
||||
|
||||
// OrderOption defines the ordering options for the File queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreatedAt orders the results by the created_at field.
|
||||
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUpdatedAt orders the results by the updated_at field.
|
||||
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByType orders the results by the type field.
|
||||
func ByType(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldType, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByName orders the results by the name field.
|
||||
func ByName(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldName, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByOwnerID orders the results by the owner_id field.
|
||||
func ByOwnerID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldOwnerID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySize orders the results by the size field.
|
||||
func BySize(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSize, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByPrimaryEntity orders the results by the primary_entity field.
|
||||
func ByPrimaryEntity(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldPrimaryEntity, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByFileChildren orders the results by the file_children field.
|
||||
func ByFileChildren(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldFileChildren, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByIsSymbolic orders the results by the is_symbolic field.
|
||||
func ByIsSymbolic(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldIsSymbolic, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByStoragePolicyFiles orders the results by the storage_policy_files field.
|
||||
func ByStoragePolicyFiles(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldStoragePolicyFiles, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByOwnerField orders the results by owner field.
|
||||
func ByOwnerField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newOwnerStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
|
||||
// ByStoragePoliciesField orders the results by storage_policies field.
|
||||
func ByStoragePoliciesField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newStoragePoliciesStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
|
||||
// ByParentField orders the results by parent field.
|
||||
func ByParentField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newParentStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
|
||||
// ByChildrenCount orders the results by children count.
|
||||
func ByChildrenCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newChildrenStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByChildren orders the results by children terms.
|
||||
func ByChildren(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newChildrenStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByMetadataCount orders the results by metadata count.
|
||||
func ByMetadataCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newMetadataStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByMetadata orders the results by metadata terms.
|
||||
func ByMetadata(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newMetadataStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByEntitiesCount orders the results by entities count.
|
||||
func ByEntitiesCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newEntitiesStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByEntities orders the results by entities terms.
|
||||
func ByEntities(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newEntitiesStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
|
||||
// BySharesCount orders the results by shares count.
|
||||
func BySharesCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newSharesStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByShares orders the results by shares terms.
|
||||
func ByShares(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newSharesStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByDirectLinksCount orders the results by direct_links count.
|
||||
func ByDirectLinksCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newDirectLinksStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByDirectLinks orders the results by direct_links terms.
|
||||
func ByDirectLinks(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newDirectLinksStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
func newOwnerStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(OwnerInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),
|
||||
)
|
||||
}
|
||||
func newStoragePoliciesStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(StoragePoliciesInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, StoragePoliciesTable, StoragePoliciesColumn),
|
||||
)
|
||||
}
|
||||
func newParentStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, ParentTable, ParentColumn),
|
||||
)
|
||||
}
|
||||
func newChildrenStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, ChildrenTable, ChildrenColumn),
|
||||
)
|
||||
}
|
||||
func newMetadataStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(MetadataInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, MetadataTable, MetadataColumn),
|
||||
)
|
||||
}
|
||||
func newEntitiesStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(EntitiesInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2M, false, EntitiesTable, EntitiesPrimaryKey...),
|
||||
)
|
||||
}
|
||||
func newSharesStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(SharesInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, SharesTable, SharesColumn),
|
||||
)
|
||||
}
|
||||
func newDirectLinksStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(DirectLinksInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, DirectLinksTable, DirectLinksColumn),
|
||||
)
|
||||
}
|
||||
680
ent/file/where.go
Normal file
680
ent/file/where.go
Normal file
@@ -0,0 +1,680 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package file
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
|
||||
)
|
||||
|
||||
// ID filters vertices based on their ID field.
|
||||
func ID(id int) predicate.File {
|
||||
return predicate.File(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDEQ applies the EQ predicate on the ID field.
|
||||
func IDEQ(id int) predicate.File {
|
||||
return predicate.File(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDNEQ applies the NEQ predicate on the ID field.
|
||||
func IDNEQ(id int) predicate.File {
|
||||
return predicate.File(sql.FieldNEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDIn applies the In predicate on the ID field.
|
||||
func IDIn(ids ...int) predicate.File {
|
||||
return predicate.File(sql.FieldIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDNotIn applies the NotIn predicate on the ID field.
|
||||
func IDNotIn(ids ...int) predicate.File {
|
||||
return predicate.File(sql.FieldNotIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDGT applies the GT predicate on the ID field.
|
||||
func IDGT(id int) predicate.File {
|
||||
return predicate.File(sql.FieldGT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDGTE applies the GTE predicate on the ID field.
|
||||
func IDGTE(id int) predicate.File {
|
||||
return predicate.File(sql.FieldGTE(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLT applies the LT predicate on the ID field.
|
||||
func IDLT(id int) predicate.File {
|
||||
return predicate.File(sql.FieldLT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLTE applies the LTE predicate on the ID field.
|
||||
func IDLTE(id int) predicate.File {
|
||||
return predicate.File(sql.FieldLTE(FieldID, id))
|
||||
}
|
||||
|
||||
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||
func CreatedAt(v time.Time) predicate.File {
|
||||
return predicate.File(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
|
||||
func UpdatedAt(v time.Time) predicate.File {
|
||||
return predicate.File(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// Type applies equality check predicate on the "type" field. It's identical to TypeEQ.
|
||||
func Type(v int) predicate.File {
|
||||
return predicate.File(sql.FieldEQ(FieldType, v))
|
||||
}
|
||||
|
||||
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
|
||||
func Name(v string) predicate.File {
|
||||
return predicate.File(sql.FieldEQ(FieldName, v))
|
||||
}
|
||||
|
||||
// OwnerID applies equality check predicate on the "owner_id" field. It's identical to OwnerIDEQ.
|
||||
func OwnerID(v int) predicate.File {
|
||||
return predicate.File(sql.FieldEQ(FieldOwnerID, v))
|
||||
}
|
||||
|
||||
// Size applies equality check predicate on the "size" field. It's identical to SizeEQ.
|
||||
func Size(v int64) predicate.File {
|
||||
return predicate.File(sql.FieldEQ(FieldSize, v))
|
||||
}
|
||||
|
||||
// PrimaryEntity applies equality check predicate on the "primary_entity" field. It's identical to PrimaryEntityEQ.
|
||||
func PrimaryEntity(v int) predicate.File {
|
||||
return predicate.File(sql.FieldEQ(FieldPrimaryEntity, v))
|
||||
}
|
||||
|
||||
// FileChildren applies equality check predicate on the "file_children" field. It's identical to FileChildrenEQ.
|
||||
func FileChildren(v int) predicate.File {
|
||||
return predicate.File(sql.FieldEQ(FieldFileChildren, v))
|
||||
}
|
||||
|
||||
// IsSymbolic applies equality check predicate on the "is_symbolic" field. It's identical to IsSymbolicEQ.
|
||||
func IsSymbolic(v bool) predicate.File {
|
||||
return predicate.File(sql.FieldEQ(FieldIsSymbolic, v))
|
||||
}
|
||||
|
||||
// StoragePolicyFiles applies equality check predicate on the "storage_policy_files" field. It's identical to StoragePolicyFilesEQ.
|
||||
func StoragePolicyFiles(v int) predicate.File {
|
||||
return predicate.File(sql.FieldEQ(FieldStoragePolicyFiles, v))
|
||||
}
|
||||
|
||||
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||
func CreatedAtEQ(v time.Time) predicate.File {
|
||||
return predicate.File(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||
func CreatedAtNEQ(v time.Time) predicate.File {
|
||||
return predicate.File(sql.FieldNEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||
func CreatedAtIn(vs ...time.Time) predicate.File {
|
||||
return predicate.File(sql.FieldIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||
func CreatedAtNotIn(vs ...time.Time) predicate.File {
|
||||
return predicate.File(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||
func CreatedAtGT(v time.Time) predicate.File {
|
||||
return predicate.File(sql.FieldGT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||
func CreatedAtGTE(v time.Time) predicate.File {
|
||||
return predicate.File(sql.FieldGTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||
func CreatedAtLT(v time.Time) predicate.File {
|
||||
return predicate.File(sql.FieldLT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||
func CreatedAtLTE(v time.Time) predicate.File {
|
||||
return predicate.File(sql.FieldLTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
|
||||
func UpdatedAtEQ(v time.Time) predicate.File {
|
||||
return predicate.File(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
|
||||
func UpdatedAtNEQ(v time.Time) predicate.File {
|
||||
return predicate.File(sql.FieldNEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtIn applies the In predicate on the "updated_at" field.
|
||||
func UpdatedAtIn(vs ...time.Time) predicate.File {
|
||||
return predicate.File(sql.FieldIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
|
||||
func UpdatedAtNotIn(vs ...time.Time) predicate.File {
|
||||
return predicate.File(sql.FieldNotIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
|
||||
func UpdatedAtGT(v time.Time) predicate.File {
|
||||
return predicate.File(sql.FieldGT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
|
||||
func UpdatedAtGTE(v time.Time) predicate.File {
|
||||
return predicate.File(sql.FieldGTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
|
||||
func UpdatedAtLT(v time.Time) predicate.File {
|
||||
return predicate.File(sql.FieldLT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
|
||||
func UpdatedAtLTE(v time.Time) predicate.File {
|
||||
return predicate.File(sql.FieldLTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// TypeEQ applies the EQ predicate on the "type" field.
|
||||
func TypeEQ(v int) predicate.File {
|
||||
return predicate.File(sql.FieldEQ(FieldType, v))
|
||||
}
|
||||
|
||||
// TypeNEQ applies the NEQ predicate on the "type" field.
|
||||
func TypeNEQ(v int) predicate.File {
|
||||
return predicate.File(sql.FieldNEQ(FieldType, v))
|
||||
}
|
||||
|
||||
// TypeIn applies the In predicate on the "type" field.
|
||||
func TypeIn(vs ...int) predicate.File {
|
||||
return predicate.File(sql.FieldIn(FieldType, vs...))
|
||||
}
|
||||
|
||||
// TypeNotIn applies the NotIn predicate on the "type" field.
|
||||
func TypeNotIn(vs ...int) predicate.File {
|
||||
return predicate.File(sql.FieldNotIn(FieldType, vs...))
|
||||
}
|
||||
|
||||
// TypeGT applies the GT predicate on the "type" field.
|
||||
func TypeGT(v int) predicate.File {
|
||||
return predicate.File(sql.FieldGT(FieldType, v))
|
||||
}
|
||||
|
||||
// TypeGTE applies the GTE predicate on the "type" field.
|
||||
func TypeGTE(v int) predicate.File {
|
||||
return predicate.File(sql.FieldGTE(FieldType, v))
|
||||
}
|
||||
|
||||
// TypeLT applies the LT predicate on the "type" field.
|
||||
func TypeLT(v int) predicate.File {
|
||||
return predicate.File(sql.FieldLT(FieldType, v))
|
||||
}
|
||||
|
||||
// TypeLTE applies the LTE predicate on the "type" field.
|
||||
func TypeLTE(v int) predicate.File {
|
||||
return predicate.File(sql.FieldLTE(FieldType, v))
|
||||
}
|
||||
|
||||
// NameEQ applies the EQ predicate on the "name" field.
|
||||
func NameEQ(v string) predicate.File {
|
||||
return predicate.File(sql.FieldEQ(FieldName, v))
|
||||
}
|
||||
|
||||
// NameNEQ applies the NEQ predicate on the "name" field.
|
||||
func NameNEQ(v string) predicate.File {
|
||||
return predicate.File(sql.FieldNEQ(FieldName, v))
|
||||
}
|
||||
|
||||
// NameIn applies the In predicate on the "name" field.
|
||||
func NameIn(vs ...string) predicate.File {
|
||||
return predicate.File(sql.FieldIn(FieldName, vs...))
|
||||
}
|
||||
|
||||
// NameNotIn applies the NotIn predicate on the "name" field.
|
||||
func NameNotIn(vs ...string) predicate.File {
|
||||
return predicate.File(sql.FieldNotIn(FieldName, vs...))
|
||||
}
|
||||
|
||||
// NameGT applies the GT predicate on the "name" field.
|
||||
func NameGT(v string) predicate.File {
|
||||
return predicate.File(sql.FieldGT(FieldName, v))
|
||||
}
|
||||
|
||||
// NameGTE applies the GTE predicate on the "name" field.
|
||||
func NameGTE(v string) predicate.File {
|
||||
return predicate.File(sql.FieldGTE(FieldName, v))
|
||||
}
|
||||
|
||||
// NameLT applies the LT predicate on the "name" field.
|
||||
func NameLT(v string) predicate.File {
|
||||
return predicate.File(sql.FieldLT(FieldName, v))
|
||||
}
|
||||
|
||||
// NameLTE applies the LTE predicate on the "name" field.
|
||||
func NameLTE(v string) predicate.File {
|
||||
return predicate.File(sql.FieldLTE(FieldName, v))
|
||||
}
|
||||
|
||||
// NameContains applies the Contains predicate on the "name" field.
|
||||
func NameContains(v string) predicate.File {
|
||||
return predicate.File(sql.FieldContains(FieldName, v))
|
||||
}
|
||||
|
||||
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
|
||||
func NameHasPrefix(v string) predicate.File {
|
||||
return predicate.File(sql.FieldHasPrefix(FieldName, v))
|
||||
}
|
||||
|
||||
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
|
||||
func NameHasSuffix(v string) predicate.File {
|
||||
return predicate.File(sql.FieldHasSuffix(FieldName, v))
|
||||
}
|
||||
|
||||
// NameEqualFold applies the EqualFold predicate on the "name" field.
|
||||
func NameEqualFold(v string) predicate.File {
|
||||
return predicate.File(sql.FieldEqualFold(FieldName, v))
|
||||
}
|
||||
|
||||
// NameContainsFold applies the ContainsFold predicate on the "name" field.
|
||||
func NameContainsFold(v string) predicate.File {
|
||||
return predicate.File(sql.FieldContainsFold(FieldName, v))
|
||||
}
|
||||
|
||||
// OwnerIDEQ applies the EQ predicate on the "owner_id" field.
|
||||
func OwnerIDEQ(v int) predicate.File {
|
||||
return predicate.File(sql.FieldEQ(FieldOwnerID, v))
|
||||
}
|
||||
|
||||
// OwnerIDNEQ applies the NEQ predicate on the "owner_id" field.
|
||||
func OwnerIDNEQ(v int) predicate.File {
|
||||
return predicate.File(sql.FieldNEQ(FieldOwnerID, v))
|
||||
}
|
||||
|
||||
// OwnerIDIn applies the In predicate on the "owner_id" field.
|
||||
func OwnerIDIn(vs ...int) predicate.File {
|
||||
return predicate.File(sql.FieldIn(FieldOwnerID, vs...))
|
||||
}
|
||||
|
||||
// OwnerIDNotIn applies the NotIn predicate on the "owner_id" field.
|
||||
func OwnerIDNotIn(vs ...int) predicate.File {
|
||||
return predicate.File(sql.FieldNotIn(FieldOwnerID, vs...))
|
||||
}
|
||||
|
||||
// SizeEQ applies the EQ predicate on the "size" field.
|
||||
func SizeEQ(v int64) predicate.File {
|
||||
return predicate.File(sql.FieldEQ(FieldSize, v))
|
||||
}
|
||||
|
||||
// SizeNEQ applies the NEQ predicate on the "size" field.
|
||||
func SizeNEQ(v int64) predicate.File {
|
||||
return predicate.File(sql.FieldNEQ(FieldSize, v))
|
||||
}
|
||||
|
||||
// SizeIn applies the In predicate on the "size" field.
|
||||
func SizeIn(vs ...int64) predicate.File {
|
||||
return predicate.File(sql.FieldIn(FieldSize, vs...))
|
||||
}
|
||||
|
||||
// SizeNotIn applies the NotIn predicate on the "size" field.
|
||||
func SizeNotIn(vs ...int64) predicate.File {
|
||||
return predicate.File(sql.FieldNotIn(FieldSize, vs...))
|
||||
}
|
||||
|
||||
// SizeGT applies the GT predicate on the "size" field.
|
||||
func SizeGT(v int64) predicate.File {
|
||||
return predicate.File(sql.FieldGT(FieldSize, v))
|
||||
}
|
||||
|
||||
// SizeGTE applies the GTE predicate on the "size" field.
|
||||
func SizeGTE(v int64) predicate.File {
|
||||
return predicate.File(sql.FieldGTE(FieldSize, v))
|
||||
}
|
||||
|
||||
// SizeLT applies the LT predicate on the "size" field.
|
||||
func SizeLT(v int64) predicate.File {
|
||||
return predicate.File(sql.FieldLT(FieldSize, v))
|
||||
}
|
||||
|
||||
// SizeLTE applies the LTE predicate on the "size" field.
|
||||
func SizeLTE(v int64) predicate.File {
|
||||
return predicate.File(sql.FieldLTE(FieldSize, v))
|
||||
}
|
||||
|
||||
// PrimaryEntityEQ applies the EQ predicate on the "primary_entity" field.
|
||||
func PrimaryEntityEQ(v int) predicate.File {
|
||||
return predicate.File(sql.FieldEQ(FieldPrimaryEntity, v))
|
||||
}
|
||||
|
||||
// PrimaryEntityNEQ applies the NEQ predicate on the "primary_entity" field.
|
||||
func PrimaryEntityNEQ(v int) predicate.File {
|
||||
return predicate.File(sql.FieldNEQ(FieldPrimaryEntity, v))
|
||||
}
|
||||
|
||||
// PrimaryEntityIn applies the In predicate on the "primary_entity" field.
|
||||
func PrimaryEntityIn(vs ...int) predicate.File {
|
||||
return predicate.File(sql.FieldIn(FieldPrimaryEntity, vs...))
|
||||
}
|
||||
|
||||
// PrimaryEntityNotIn applies the NotIn predicate on the "primary_entity" field.
|
||||
func PrimaryEntityNotIn(vs ...int) predicate.File {
|
||||
return predicate.File(sql.FieldNotIn(FieldPrimaryEntity, vs...))
|
||||
}
|
||||
|
||||
// PrimaryEntityGT applies the GT predicate on the "primary_entity" field.
|
||||
func PrimaryEntityGT(v int) predicate.File {
|
||||
return predicate.File(sql.FieldGT(FieldPrimaryEntity, v))
|
||||
}
|
||||
|
||||
// PrimaryEntityGTE applies the GTE predicate on the "primary_entity" field.
|
||||
func PrimaryEntityGTE(v int) predicate.File {
|
||||
return predicate.File(sql.FieldGTE(FieldPrimaryEntity, v))
|
||||
}
|
||||
|
||||
// PrimaryEntityLT applies the LT predicate on the "primary_entity" field.
|
||||
func PrimaryEntityLT(v int) predicate.File {
|
||||
return predicate.File(sql.FieldLT(FieldPrimaryEntity, v))
|
||||
}
|
||||
|
||||
// PrimaryEntityLTE applies the LTE predicate on the "primary_entity" field.
|
||||
func PrimaryEntityLTE(v int) predicate.File {
|
||||
return predicate.File(sql.FieldLTE(FieldPrimaryEntity, v))
|
||||
}
|
||||
|
||||
// PrimaryEntityIsNil applies the IsNil predicate on the "primary_entity" field.
|
||||
func PrimaryEntityIsNil() predicate.File {
|
||||
return predicate.File(sql.FieldIsNull(FieldPrimaryEntity))
|
||||
}
|
||||
|
||||
// PrimaryEntityNotNil applies the NotNil predicate on the "primary_entity" field.
|
||||
func PrimaryEntityNotNil() predicate.File {
|
||||
return predicate.File(sql.FieldNotNull(FieldPrimaryEntity))
|
||||
}
|
||||
|
||||
// FileChildrenEQ applies the EQ predicate on the "file_children" field.
|
||||
func FileChildrenEQ(v int) predicate.File {
|
||||
return predicate.File(sql.FieldEQ(FieldFileChildren, v))
|
||||
}
|
||||
|
||||
// FileChildrenNEQ applies the NEQ predicate on the "file_children" field.
|
||||
func FileChildrenNEQ(v int) predicate.File {
|
||||
return predicate.File(sql.FieldNEQ(FieldFileChildren, v))
|
||||
}
|
||||
|
||||
// FileChildrenIn applies the In predicate on the "file_children" field.
|
||||
func FileChildrenIn(vs ...int) predicate.File {
|
||||
return predicate.File(sql.FieldIn(FieldFileChildren, vs...))
|
||||
}
|
||||
|
||||
// FileChildrenNotIn applies the NotIn predicate on the "file_children" field.
|
||||
func FileChildrenNotIn(vs ...int) predicate.File {
|
||||
return predicate.File(sql.FieldNotIn(FieldFileChildren, vs...))
|
||||
}
|
||||
|
||||
// FileChildrenIsNil applies the IsNil predicate on the "file_children" field.
|
||||
func FileChildrenIsNil() predicate.File {
|
||||
return predicate.File(sql.FieldIsNull(FieldFileChildren))
|
||||
}
|
||||
|
||||
// FileChildrenNotNil applies the NotNil predicate on the "file_children" field.
|
||||
func FileChildrenNotNil() predicate.File {
|
||||
return predicate.File(sql.FieldNotNull(FieldFileChildren))
|
||||
}
|
||||
|
||||
// IsSymbolicEQ applies the EQ predicate on the "is_symbolic" field.
|
||||
func IsSymbolicEQ(v bool) predicate.File {
|
||||
return predicate.File(sql.FieldEQ(FieldIsSymbolic, v))
|
||||
}
|
||||
|
||||
// IsSymbolicNEQ applies the NEQ predicate on the "is_symbolic" field.
|
||||
func IsSymbolicNEQ(v bool) predicate.File {
|
||||
return predicate.File(sql.FieldNEQ(FieldIsSymbolic, v))
|
||||
}
|
||||
|
||||
// PropsIsNil applies the IsNil predicate on the "props" field.
|
||||
func PropsIsNil() predicate.File {
|
||||
return predicate.File(sql.FieldIsNull(FieldProps))
|
||||
}
|
||||
|
||||
// PropsNotNil applies the NotNil predicate on the "props" field.
|
||||
func PropsNotNil() predicate.File {
|
||||
return predicate.File(sql.FieldNotNull(FieldProps))
|
||||
}
|
||||
|
||||
// StoragePolicyFilesEQ applies the EQ predicate on the "storage_policy_files" field.
|
||||
func StoragePolicyFilesEQ(v int) predicate.File {
|
||||
return predicate.File(sql.FieldEQ(FieldStoragePolicyFiles, v))
|
||||
}
|
||||
|
||||
// StoragePolicyFilesNEQ applies the NEQ predicate on the "storage_policy_files" field.
|
||||
func StoragePolicyFilesNEQ(v int) predicate.File {
|
||||
return predicate.File(sql.FieldNEQ(FieldStoragePolicyFiles, v))
|
||||
}
|
||||
|
||||
// StoragePolicyFilesIn applies the In predicate on the "storage_policy_files" field.
|
||||
func StoragePolicyFilesIn(vs ...int) predicate.File {
|
||||
return predicate.File(sql.FieldIn(FieldStoragePolicyFiles, vs...))
|
||||
}
|
||||
|
||||
// StoragePolicyFilesNotIn applies the NotIn predicate on the "storage_policy_files" field.
|
||||
func StoragePolicyFilesNotIn(vs ...int) predicate.File {
|
||||
return predicate.File(sql.FieldNotIn(FieldStoragePolicyFiles, vs...))
|
||||
}
|
||||
|
||||
// StoragePolicyFilesIsNil applies the IsNil predicate on the "storage_policy_files" field.
|
||||
func StoragePolicyFilesIsNil() predicate.File {
|
||||
return predicate.File(sql.FieldIsNull(FieldStoragePolicyFiles))
|
||||
}
|
||||
|
||||
// StoragePolicyFilesNotNil applies the NotNil predicate on the "storage_policy_files" field.
|
||||
func StoragePolicyFilesNotNil() predicate.File {
|
||||
return predicate.File(sql.FieldNotNull(FieldStoragePolicyFiles))
|
||||
}
|
||||
|
||||
// HasOwner applies the HasEdge predicate on the "owner" edge.
|
||||
func HasOwner() predicate.File {
|
||||
return predicate.File(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates).
|
||||
func HasOwnerWith(preds ...predicate.User) predicate.File {
|
||||
return predicate.File(func(s *sql.Selector) {
|
||||
step := newOwnerStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// HasStoragePolicies applies the HasEdge predicate on the "storage_policies" edge.
|
||||
func HasStoragePolicies() predicate.File {
|
||||
return predicate.File(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, StoragePoliciesTable, StoragePoliciesColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasStoragePoliciesWith applies the HasEdge predicate on the "storage_policies" edge with a given conditions (other predicates).
|
||||
func HasStoragePoliciesWith(preds ...predicate.StoragePolicy) predicate.File {
|
||||
return predicate.File(func(s *sql.Selector) {
|
||||
step := newStoragePoliciesStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// HasParent applies the HasEdge predicate on the "parent" edge.
|
||||
func HasParent() predicate.File {
|
||||
return predicate.File(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, ParentTable, ParentColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasParentWith applies the HasEdge predicate on the "parent" edge with a given conditions (other predicates).
|
||||
func HasParentWith(preds ...predicate.File) predicate.File {
|
||||
return predicate.File(func(s *sql.Selector) {
|
||||
step := newParentStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// HasChildren applies the HasEdge predicate on the "children" edge.
|
||||
func HasChildren() predicate.File {
|
||||
return predicate.File(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, ChildrenTable, ChildrenColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasChildrenWith applies the HasEdge predicate on the "children" edge with a given conditions (other predicates).
|
||||
func HasChildrenWith(preds ...predicate.File) predicate.File {
|
||||
return predicate.File(func(s *sql.Selector) {
|
||||
step := newChildrenStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// HasMetadata applies the HasEdge predicate on the "metadata" edge.
|
||||
func HasMetadata() predicate.File {
|
||||
return predicate.File(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, MetadataTable, MetadataColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasMetadataWith applies the HasEdge predicate on the "metadata" edge with a given conditions (other predicates).
|
||||
func HasMetadataWith(preds ...predicate.Metadata) predicate.File {
|
||||
return predicate.File(func(s *sql.Selector) {
|
||||
step := newMetadataStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// HasEntities applies the HasEdge predicate on the "entities" edge.
|
||||
func HasEntities() predicate.File {
|
||||
return predicate.File(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2M, false, EntitiesTable, EntitiesPrimaryKey...),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasEntitiesWith applies the HasEdge predicate on the "entities" edge with a given conditions (other predicates).
|
||||
func HasEntitiesWith(preds ...predicate.Entity) predicate.File {
|
||||
return predicate.File(func(s *sql.Selector) {
|
||||
step := newEntitiesStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// HasShares applies the HasEdge predicate on the "shares" edge.
|
||||
func HasShares() predicate.File {
|
||||
return predicate.File(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, SharesTable, SharesColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasSharesWith applies the HasEdge predicate on the "shares" edge with a given conditions (other predicates).
|
||||
func HasSharesWith(preds ...predicate.Share) predicate.File {
|
||||
return predicate.File(func(s *sql.Selector) {
|
||||
step := newSharesStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// HasDirectLinks applies the HasEdge predicate on the "direct_links" edge.
|
||||
func HasDirectLinks() predicate.File {
|
||||
return predicate.File(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, DirectLinksTable, DirectLinksColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasDirectLinksWith applies the HasEdge predicate on the "direct_links" edge with a given conditions (other predicates).
|
||||
func HasDirectLinksWith(preds ...predicate.DirectLink) predicate.File {
|
||||
return predicate.File(func(s *sql.Selector) {
|
||||
step := newDirectLinksStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.File) predicate.File {
|
||||
return predicate.File(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.File) predicate.File {
|
||||
return predicate.File(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.File) predicate.File {
|
||||
return predicate.File(sql.NotPredicates(p))
|
||||
}
|
||||
1431
ent/file_create.go
Normal file
1431
ent/file_create.go
Normal file
File diff suppressed because it is too large
Load Diff
88
ent/file_delete.go
Normal file
88
ent/file_delete.go
Normal file
@@ -0,0 +1,88 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/file"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/predicate"
|
||||
)
|
||||
|
||||
// FileDelete is the builder for deleting a File entity.
|
||||
type FileDelete struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *FileMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the FileDelete builder.
|
||||
func (fd *FileDelete) Where(ps ...predicate.File) *FileDelete {
|
||||
fd.mutation.Where(ps...)
|
||||
return fd
|
||||
}
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (fd *FileDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, fd.sqlExec, fd.mutation, fd.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (fd *FileDelete) ExecX(ctx context.Context) int {
|
||||
n, err := fd.Exec(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (fd *FileDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
_spec := sqlgraph.NewDeleteSpec(file.Table, sqlgraph.NewFieldSpec(file.FieldID, field.TypeInt))
|
||||
if ps := fd.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
affected, err := sqlgraph.DeleteNodes(ctx, fd.driver, _spec)
|
||||
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
fd.mutation.done = true
|
||||
return affected, err
|
||||
}
|
||||
|
||||
// FileDeleteOne is the builder for deleting a single File entity.
|
||||
type FileDeleteOne struct {
|
||||
fd *FileDelete
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the FileDelete builder.
|
||||
func (fdo *FileDeleteOne) Where(ps ...predicate.File) *FileDeleteOne {
|
||||
fdo.fd.mutation.Where(ps...)
|
||||
return fdo
|
||||
}
|
||||
|
||||
// Exec executes the deletion query.
|
||||
func (fdo *FileDeleteOne) Exec(ctx context.Context) error {
|
||||
n, err := fdo.fd.Exec(ctx)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case n == 0:
|
||||
return &NotFoundError{file.Label}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (fdo *FileDeleteOne) ExecX(ctx context.Context) {
|
||||
if err := fdo.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
1156
ent/file_query.go
Normal file
1156
ent/file_query.go
Normal file
File diff suppressed because it is too large
Load Diff
1737
ent/file_update.go
Normal file
1737
ent/file_update.go
Normal file
File diff suppressed because it is too large
Load Diff
204
ent/fsevent.go
Normal file
204
ent/fsevent.go
Normal file
@@ -0,0 +1,204 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/fsevent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/user"
|
||||
"github.com/gofrs/uuid"
|
||||
)
|
||||
|
||||
// FsEvent is the model entity for the FsEvent schema.
|
||||
type FsEvent struct {
|
||||
config `json:"-"`
|
||||
// ID of the ent.
|
||||
ID int `json:"id,omitempty"`
|
||||
// CreatedAt holds the value of the "created_at" field.
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// UpdatedAt holds the value of the "updated_at" field.
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
// DeletedAt holds the value of the "deleted_at" field.
|
||||
DeletedAt *time.Time `json:"deleted_at,omitempty"`
|
||||
// Event holds the value of the "event" field.
|
||||
Event string `json:"event,omitempty"`
|
||||
// Subscriber holds the value of the "subscriber" field.
|
||||
Subscriber uuid.UUID `json:"subscriber,omitempty"`
|
||||
// UserFsevent holds the value of the "user_fsevent" field.
|
||||
UserFsevent int `json:"user_fsevent,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the FsEventQuery when eager-loading is set.
|
||||
Edges FsEventEdges `json:"edges"`
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// FsEventEdges holds the relations/edges for other nodes in the graph.
|
||||
type FsEventEdges struct {
|
||||
// User holds the value of the user edge.
|
||||
User *User `json:"user,omitempty"`
|
||||
// loadedTypes holds the information for reporting if a
|
||||
// type was loaded (or requested) in eager-loading or not.
|
||||
loadedTypes [1]bool
|
||||
}
|
||||
|
||||
// UserOrErr returns the User value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e FsEventEdges) UserOrErr() (*User, error) {
|
||||
if e.loadedTypes[0] {
|
||||
if e.User == nil {
|
||||
// Edge was loaded but was not found.
|
||||
return nil, &NotFoundError{label: user.Label}
|
||||
}
|
||||
return e.User, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "user"}
|
||||
}
|
||||
|
||||
// scanValues returns the types for scanning values from sql.Rows.
|
||||
func (*FsEvent) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case fsevent.FieldID, fsevent.FieldUserFsevent:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case fsevent.FieldEvent:
|
||||
values[i] = new(sql.NullString)
|
||||
case fsevent.FieldCreatedAt, fsevent.FieldUpdatedAt, fsevent.FieldDeletedAt:
|
||||
values[i] = new(sql.NullTime)
|
||||
case fsevent.FieldSubscriber:
|
||||
values[i] = new(uuid.UUID)
|
||||
default:
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||
// to the FsEvent fields.
|
||||
func (fe *FsEvent) assignValues(columns []string, values []any) error {
|
||||
if m, n := len(values), len(columns); m < n {
|
||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||
}
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case fsevent.FieldID:
|
||||
value, ok := values[i].(*sql.NullInt64)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected type %T for field id", value)
|
||||
}
|
||||
fe.ID = int(value.Int64)
|
||||
case fsevent.FieldCreatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||
} else if value.Valid {
|
||||
fe.CreatedAt = value.Time
|
||||
}
|
||||
case fsevent.FieldUpdatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||
} else if value.Valid {
|
||||
fe.UpdatedAt = value.Time
|
||||
}
|
||||
case fsevent.FieldDeletedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
|
||||
} else if value.Valid {
|
||||
fe.DeletedAt = new(time.Time)
|
||||
*fe.DeletedAt = value.Time
|
||||
}
|
||||
case fsevent.FieldEvent:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field event", values[i])
|
||||
} else if value.Valid {
|
||||
fe.Event = value.String
|
||||
}
|
||||
case fsevent.FieldSubscriber:
|
||||
if value, ok := values[i].(*uuid.UUID); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field subscriber", values[i])
|
||||
} else if value != nil {
|
||||
fe.Subscriber = *value
|
||||
}
|
||||
case fsevent.FieldUserFsevent:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field user_fsevent", values[i])
|
||||
} else if value.Valid {
|
||||
fe.UserFsevent = int(value.Int64)
|
||||
}
|
||||
default:
|
||||
fe.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the FsEvent.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (fe *FsEvent) Value(name string) (ent.Value, error) {
|
||||
return fe.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryUser queries the "user" edge of the FsEvent entity.
|
||||
func (fe *FsEvent) QueryUser() *UserQuery {
|
||||
return NewFsEventClient(fe.config).QueryUser(fe)
|
||||
}
|
||||
|
||||
// Update returns a builder for updating this FsEvent.
|
||||
// Note that you need to call FsEvent.Unwrap() before calling this method if this FsEvent
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
func (fe *FsEvent) Update() *FsEventUpdateOne {
|
||||
return NewFsEventClient(fe.config).UpdateOne(fe)
|
||||
}
|
||||
|
||||
// Unwrap unwraps the FsEvent entity that was returned from a transaction after it was closed,
|
||||
// so that all future queries will be executed through the driver which created the transaction.
|
||||
func (fe *FsEvent) Unwrap() *FsEvent {
|
||||
_tx, ok := fe.config.driver.(*txDriver)
|
||||
if !ok {
|
||||
panic("ent: FsEvent is not a transactional entity")
|
||||
}
|
||||
fe.config.driver = _tx.drv
|
||||
return fe
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer.
|
||||
func (fe *FsEvent) String() string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("FsEvent(")
|
||||
builder.WriteString(fmt.Sprintf("id=%v, ", fe.ID))
|
||||
builder.WriteString("created_at=")
|
||||
builder.WriteString(fe.CreatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("updated_at=")
|
||||
builder.WriteString(fe.UpdatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
if v := fe.DeletedAt; v != nil {
|
||||
builder.WriteString("deleted_at=")
|
||||
builder.WriteString(v.Format(time.ANSIC))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("event=")
|
||||
builder.WriteString(fe.Event)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("subscriber=")
|
||||
builder.WriteString(fmt.Sprintf("%v", fe.Subscriber))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("user_fsevent=")
|
||||
builder.WriteString(fmt.Sprintf("%v", fe.UserFsevent))
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// SetUser manually set the edge as loaded state.
|
||||
func (e *FsEvent) SetUser(v *User) {
|
||||
e.Edges.User = v
|
||||
e.Edges.loadedTypes[0] = true
|
||||
}
|
||||
|
||||
// FsEvents is a parsable slice of FsEvent.
|
||||
type FsEvents []*FsEvent
|
||||
130
ent/fsevent/fsevent.go
Normal file
130
ent/fsevent/fsevent.go
Normal file
@@ -0,0 +1,130 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package fsevent
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
const (
|
||||
// Label holds the string label denoting the fsevent type in the database.
|
||||
Label = "fs_event"
|
||||
// FieldID holds the string denoting the id field in the database.
|
||||
FieldID = "id"
|
||||
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||
FieldCreatedAt = "created_at"
|
||||
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||
FieldUpdatedAt = "updated_at"
|
||||
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
|
||||
FieldDeletedAt = "deleted_at"
|
||||
// FieldEvent holds the string denoting the event field in the database.
|
||||
FieldEvent = "event"
|
||||
// FieldSubscriber holds the string denoting the subscriber field in the database.
|
||||
FieldSubscriber = "subscriber"
|
||||
// FieldUserFsevent holds the string denoting the user_fsevent field in the database.
|
||||
FieldUserFsevent = "user_fsevent"
|
||||
// EdgeUser holds the string denoting the user edge name in mutations.
|
||||
EdgeUser = "user"
|
||||
// Table holds the table name of the fsevent in the database.
|
||||
Table = "fs_events"
|
||||
// UserTable is the table that holds the user relation/edge.
|
||||
UserTable = "fs_events"
|
||||
// UserInverseTable is the table name for the User entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "user" package.
|
||||
UserInverseTable = "users"
|
||||
// UserColumn is the table column denoting the user relation/edge.
|
||||
UserColumn = "user_fsevent"
|
||||
)
|
||||
|
||||
// Columns holds all SQL columns for fsevent fields.
|
||||
var Columns = []string{
|
||||
FieldID,
|
||||
FieldCreatedAt,
|
||||
FieldUpdatedAt,
|
||||
FieldDeletedAt,
|
||||
FieldEvent,
|
||||
FieldSubscriber,
|
||||
FieldUserFsevent,
|
||||
}
|
||||
|
||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||
func ValidColumn(column string) bool {
|
||||
for i := range Columns {
|
||||
if column == Columns[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Note that the variables below are initialized by the runtime
|
||||
// package on the initialization of the application. Therefore,
|
||||
// it should be imported in the main as follows:
|
||||
//
|
||||
// import _ "github.com/cloudreve/Cloudreve/v4/ent/runtime"
|
||||
var (
|
||||
Hooks [1]ent.Hook
|
||||
Interceptors [1]ent.Interceptor
|
||||
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||
DefaultCreatedAt func() time.Time
|
||||
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||
DefaultUpdatedAt func() time.Time
|
||||
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||
UpdateDefaultUpdatedAt func() time.Time
|
||||
)
|
||||
|
||||
// OrderOption defines the ordering options for the FsEvent queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreatedAt orders the results by the created_at field.
|
||||
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUpdatedAt orders the results by the updated_at field.
|
||||
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByDeletedAt orders the results by the deleted_at field.
|
||||
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByEvent orders the results by the event field.
|
||||
func ByEvent(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldEvent, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySubscriber orders the results by the subscriber field.
|
||||
func BySubscriber(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSubscriber, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUserFsevent orders the results by the user_fsevent field.
|
||||
func ByUserFsevent(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUserFsevent, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUserField orders the results by user field.
|
||||
func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
func newUserStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(UserInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
|
||||
)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user