initial
This commit is contained in:
commit
7634b181de
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
.idea
|
202
LICENSE
Normal file
202
LICENSE
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2023 Darko Luketic
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
63
README.md
Normal file
63
README.md
Normal file
@ -0,0 +1,63 @@
|
||||
# gomanager
|
||||
|
||||
## description
|
||||
|
||||
Go Manager is a program that watches your package binaries and if they're not the same version of your installed go version, it rebuilds the packages.
|
||||
|
||||
all commands **must** be ran as root
|
||||
|
||||
## install
|
||||
```bash
|
||||
go install code.icod.de/dalu/gomanager@latest
|
||||
mv ~/go/bin/gomanager /usr/local/bin/
|
||||
gomanager setup
|
||||
```
|
||||
## usage
|
||||
|
||||
### add project
|
||||
```bash
|
||||
gomanager add \
|
||||
--root-path="" \
|
||||
--binary-path="" \
|
||||
--service-name="" \
|
||||
|
||||
```
|
||||
|
||||
### remove project
|
||||
```bash
|
||||
gomanager remove
|
||||
```
|
||||
|
||||
### start/run
|
||||
```bash
|
||||
gomanager start
|
||||
```
|
||||
|
||||
### systemd service
|
||||
read the contrib directory
|
||||
The runner is ran every 24 hours.
|
||||
|
||||
### cron job
|
||||
This runs the watcher every day a 2am
|
||||
```bash
|
||||
crontab -e
|
||||
```
|
||||
```cronexp
|
||||
0 2 * * * /usr/local/bin/gomanager cron
|
||||
```
|
||||
|
||||
### systemd timer
|
||||
see the
|
||||
- contrib/gomanager-cron.service
|
||||
- contrib/gomanager-cron.timer
|
||||
|
||||
copy to `/etc/systemd/system/` and
|
||||
```bash
|
||||
systemctl enable --now gomanager-cron.timer
|
||||
```
|
||||
|
||||
# uninstall
|
||||
```bash
|
||||
gomanager clean
|
||||
rm /usr/local/bin/gomanager
|
||||
```
|
120
cmd/add.go
Normal file
120
cmd/add.go
Normal file
@ -0,0 +1,120 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
var (
|
||||
addUser = ""
|
||||
addGroup = ""
|
||||
addRootPath = ""
|
||||
addBinaryPath = ""
|
||||
addServiceName = ""
|
||||
addBinaryTargetPath = ""
|
||||
)
|
||||
|
||||
// addCmd represents the add command
|
||||
var addCmd = &cobra.Command{
|
||||
Use: "add",
|
||||
Short: "add a project for watching",
|
||||
Long: `add a project for watching
|
||||
Example:
|
||||
A project residing in /var/www/dalu/project
|
||||
with a binary project at /var/www/dalu/project/project
|
||||
and a systemd service named dalu-project.service
|
||||
with the binary being owned by dalu:dalu
|
||||
|
||||
gomanager add \
|
||||
--root-path="/var/www/dalu/project" \
|
||||
--binary-path="project" \
|
||||
--service-name="dalu-project.service" \
|
||||
--user="dalu" \
|
||||
--group="dalu"
|
||||
|
||||
let's assume we'd like to resulting binary to be moved to /usr/local/bin/project after building
|
||||
we add
|
||||
--binary-target-path="/usr/local/bin/project"
|
||||
|
||||
or with short notation:
|
||||
gomanager add \
|
||||
-r="/var/www/dalu/project" \
|
||||
-b="project" \
|
||||
-s="dalu-project.service" \
|
||||
-u="dalu" \
|
||||
-g="dalu"
|
||||
|
||||
with moving the compiled binary
|
||||
-t="/usr/local/bin/project"
|
||||
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
runtime.GOMAXPROCS(1)
|
||||
client, err := ent.Open("sqlite3", fmt.Sprintf("file:%s?mode=rwc&cache=private&_fk=1", sqliteFilename))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed opening connection to sqlite: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
if addUser == "" {
|
||||
return errors.New("user is empty")
|
||||
}
|
||||
if addGroup == "" {
|
||||
return errors.New("group is empty")
|
||||
}
|
||||
if addRootPath == "" {
|
||||
return errors.New("root-path is empty")
|
||||
}
|
||||
if addBinaryPath == "" {
|
||||
return errors.New("binary-path is empty")
|
||||
}
|
||||
if addServiceName == "" {
|
||||
return errors.New("service-name is empty")
|
||||
}
|
||||
|
||||
p := client.Project.
|
||||
Create().
|
||||
SetUser(addUser).
|
||||
SetGroup(addGroup).
|
||||
SetRootPath(addRootPath).
|
||||
SetBinaryPath(addBinaryPath).
|
||||
SetServiceName(addServiceName)
|
||||
|
||||
if addBinaryTargetPath != "" {
|
||||
p = p.SetMoveToTarget(true).SetBinaryTargetPath(addBinaryTargetPath)
|
||||
}
|
||||
project, e := p.Save(context.Background())
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
fmt.Printf("Saved project with ID: %d\n", project.ID)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(addCmd)
|
||||
|
||||
// Here you will define your flags and configuration settings.
|
||||
|
||||
// Cobra supports Persistent Flags which will work for this command
|
||||
// and all subcommands, e.g.:
|
||||
// addCmd.PersistentFlags().String("foo", "", "A help for foo")
|
||||
|
||||
// Cobra supports local flags which will only run when this command
|
||||
// is called directly, e.g.:
|
||||
// addCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
||||
addCmd.Flags().StringVarP(&addRootPath, "root-path", "r", "", "/var/www/code.icod.de/goroot")
|
||||
addCmd.Flags().StringVarP(&addServiceName, "service-name", "s", "", "mysystemd.service")
|
||||
addCmd.Flags().StringVarP(&addBinaryPath, "binary-path", "b", "", "gomanager or cmd/run/gomanager")
|
||||
addCmd.Flags().StringVarP(&addBinaryTargetPath, "binary-target-path", "t", "", "../binaries/gomanager-binary")
|
||||
addCmd.Flags().StringVarP(&addUser, "user", "u", "nginx", "www-data")
|
||||
addCmd.Flags().StringVarP(&addGroup, "group", "g", "nginx", "www-data")
|
||||
}
|
35
cmd/clean.go
Normal file
35
cmd/clean.go
Normal file
@ -0,0 +1,35 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// cleanCmd represents the clean command
|
||||
var cleanCmd = &cobra.Command{
|
||||
Use: "clean",
|
||||
Short: "Cleans the gomanager database",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
runtime.GOMAXPROCS(1)
|
||||
if e := os.Remove(sqliteFilename); e != nil {
|
||||
return e
|
||||
}
|
||||
return os.Remove("/var/lib/gomanager")
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(cleanCmd)
|
||||
|
||||
// Here you will define your flags and configuration settings.
|
||||
|
||||
// Cobra supports Persistent Flags which will work for this command
|
||||
// and all subcommands, e.g.:
|
||||
// cleanCmd.PersistentFlags().String("foo", "", "A help for foo")
|
||||
|
||||
// Cobra supports local flags which will only run when this command
|
||||
// is called directly, e.g.:
|
||||
// cleanCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
||||
}
|
42
cmd/cron.go
Normal file
42
cmd/cron.go
Normal file
@ -0,0 +1,42 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent"
|
||||
"code.icod.de/dalu/gomanager/runner"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
// cronCmd represents the cron command
|
||||
var cronCmd = &cobra.Command{
|
||||
Use: "cron",
|
||||
Short: "this command is meant to be ran via cron",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
runtime.GOMAXPROCS(1)
|
||||
client, err := ent.Open("sqlite3", fmt.Sprintf("file:%s?mode=rwc&cache=private&_fk=1", sqliteFilename))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed opening connection to sqlite: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
r := runner.NewRunner(client)
|
||||
return r.Run()
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(cronCmd)
|
||||
|
||||
// Here you will define your flags and configuration settings.
|
||||
|
||||
// Cobra supports Persistent Flags which will work for this command
|
||||
// and all subcommands, e.g.:
|
||||
// cronCmd.PersistentFlags().String("foo", "", "A help for foo")
|
||||
|
||||
// Cobra supports local flags which will only run when this command
|
||||
// is called directly, e.g.:
|
||||
// cronCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
||||
}
|
81
cmd/list.go
Normal file
81
cmd/list.go
Normal file
@ -0,0 +1,81 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent"
|
||||
"github.com/jedib0t/go-pretty/v6/table"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
var (
|
||||
colTitleIndex = "ID"
|
||||
colTitleRootPath = "Root Path"
|
||||
colTitleBinaryPath = "Binary"
|
||||
colTitleServiceName = "Service Name"
|
||||
colTitleUser = "User"
|
||||
colTitleGroup = "Group"
|
||||
colTitleBinaryTargetPath = "Target"
|
||||
rowHeader = table.Row{
|
||||
colTitleIndex,
|
||||
colTitleRootPath,
|
||||
colTitleBinaryPath,
|
||||
colTitleServiceName,
|
||||
colTitleUser,
|
||||
colTitleGroup,
|
||||
colTitleBinaryTargetPath,
|
||||
}
|
||||
)
|
||||
|
||||
// listCmd represents the list command
|
||||
var listCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "Lists all watched projects",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
runtime.GOMAXPROCS(1)
|
||||
client, err := ent.Open("sqlite3", fmt.Sprintf("file:%s?mode=rwc&cache=private&_fk=1", sqliteFilename))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed opening connection to sqlite: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
ms, e := client.Project.Query().All(context.Background())
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
tw := table.NewWriter()
|
||||
tw.AppendHeader(rowHeader)
|
||||
for _, m := range ms {
|
||||
tw.AppendRow(table.Row{
|
||||
m.ID,
|
||||
m.RootPath,
|
||||
m.BinaryPath,
|
||||
m.ServiceName,
|
||||
m.User,
|
||||
m.Group,
|
||||
m.BinaryTargetPath,
|
||||
})
|
||||
}
|
||||
tw.SetIndexColumn(1)
|
||||
tw.SetTitle("Watched Projects")
|
||||
fmt.Println(tw.Render())
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(listCmd)
|
||||
|
||||
// Here you will define your flags and configuration settings.
|
||||
|
||||
// Cobra supports Persistent Flags which will work for this command
|
||||
// and all subcommands, e.g.:
|
||||
// listCmd.PersistentFlags().String("foo", "", "A help for foo")
|
||||
|
||||
// Cobra supports local flags which will only run when this command
|
||||
// is called directly, e.g.:
|
||||
// listCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
||||
}
|
59
cmd/logs.go
Normal file
59
cmd/logs.go
Normal file
@ -0,0 +1,59 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent"
|
||||
"code.icod.de/dalu/gomanager/ent/logentry"
|
||||
"code.icod.de/dalu/gomanager/ent/project"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
var (
|
||||
showLogID = -1
|
||||
)
|
||||
|
||||
// logsCmd represents the logs command
|
||||
var logsCmd = &cobra.Command{
|
||||
Use: "logs",
|
||||
Short: "Show logs of a project",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
runtime.GOMAXPROCS(1)
|
||||
client, err := ent.Open("sqlite3", fmt.Sprintf("file:%s?mode=rwc&cache=private&_fk=1", sqliteFilename))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed opening connection to sqlite: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
ms, e := client.Project.Query().Where(project.ID(showLogID)).
|
||||
QueryLogentries().
|
||||
Order(logentry.ByDate(sql.OrderDesc())).
|
||||
All(context.Background())
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
for _, m := range ms {
|
||||
fmt.Println(m.Date, m.Content)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(logsCmd)
|
||||
|
||||
// Here you will define your flags and configuration settings.
|
||||
|
||||
// Cobra supports Persistent Flags which will work for this command
|
||||
// and all subcommands, e.g.:
|
||||
// logsCmd.PersistentFlags().String("foo", "", "A help for foo")
|
||||
|
||||
// Cobra supports local flags which will only run when this command
|
||||
// is called directly, e.g.:
|
||||
// logsCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
||||
logsCmd.Flags().IntVar(&showLogID, "id", -1, "1")
|
||||
}
|
70
cmd/remove.go
Normal file
70
cmd/remove.go
Normal file
@ -0,0 +1,70 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
var (
|
||||
removeID = -1
|
||||
)
|
||||
|
||||
// removeCmd represents the remove command
|
||||
var removeCmd = &cobra.Command{
|
||||
Use: "remove",
|
||||
Short: "Remove a job with an ID",
|
||||
Long: `Remove a job with an ID
|
||||
|
||||
Use gomanager list to find the project's (or job's) ID.
|
||||
Example:
|
||||
sudo ./gomanager list
|
||||
+---------------------------------------------------------------------------------------------------------------------+
|
||||
| Watched Projects |
|
||||
+----+---------------------------------------------------+-------------+---------------------+-------+-------+--------+
|
||||
| ID | ROOT PATH | BINARY | SERVICE NAME | USER | GROUP | TARGET |
|
||||
+----+---------------------------------------------------+-------------+---------------------+-------+-------+--------+
|
||||
| 1 | /home/darko/go/src/code.icod.de/dalu/simpleforum/ | simpleforum | simpleforum.service | nginx | nginx | |
|
||||
| 2 | /home/darko/go/src/code.icod.de/dalu/affx/ | affx | affx.service | nginx | nginx | |
|
||||
+----+---------------------------------------------------+-------------+---------------------+-------+-------+--------+
|
||||
|
||||
Then use
|
||||
gomanager remove --id=1
|
||||
to remove the simpleforum project
|
||||
|
||||
'
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
runtime.GOMAXPROCS(1)
|
||||
client, err := ent.Open("sqlite3", fmt.Sprintf("file:%s?mode=rwc&cache=private&_fk=1", sqliteFilename))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed opening connection to sqlite: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
if removeID == -1 {
|
||||
return errors.New("the ID is not set. Use --id=<number> to remove a job with a <number> ID")
|
||||
}
|
||||
return client.Project.DeleteOneID(removeID).Exec(context.Background())
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(removeCmd)
|
||||
|
||||
// Here you will define your flags and configuration settings.
|
||||
|
||||
// Cobra supports Persistent Flags which will work for this command
|
||||
// and all subcommands, e.g.:
|
||||
// removeCmd.PersistentFlags().String("foo", "", "A help for foo")
|
||||
|
||||
// Cobra supports local flags which will only run when this command
|
||||
// is called directly, e.g.:
|
||||
// removeCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
||||
removeCmd.Flags().IntVar(&removeID, "id", -1, "1")
|
||||
}
|
39
cmd/root.go
Normal file
39
cmd/root.go
Normal file
@ -0,0 +1,39 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const sqliteFilename = "/var/lib/gomanager/gomanager.dat"
|
||||
|
||||
// rootCmd represents the base command when called without any subcommands
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "gomanager",
|
||||
Short: "gomanager: code.icod.de/dalu/gomanager",
|
||||
// Uncomment the following line if your bare application
|
||||
// has an action associated with it:
|
||||
// Run: func(cmd *cobra.Command, args []string) { },
|
||||
}
|
||||
|
||||
// Execute adds all child commands to the root command and sets flags appropriately.
|
||||
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
||||
func Execute() {
|
||||
err := rootCmd.Execute()
|
||||
if err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Here you will define your flags and configuration settings.
|
||||
// Cobra supports persistent flags, which, if defined here,
|
||||
// will be global for your application.
|
||||
|
||||
// rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.gomanager.yaml)")
|
||||
|
||||
// Cobra also supports local flags, which will only run
|
||||
// when this action is called directly.
|
||||
rootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
||||
}
|
55
cmd/setup.go
Normal file
55
cmd/setup.go
Normal file
@ -0,0 +1,55 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
// setupCmd represents the setup command
|
||||
var setupCmd = &cobra.Command{
|
||||
Use: "setup",
|
||||
Short: "sets up the program",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if os.Getuid() != 0 {
|
||||
return errors.New("this program must be ran as root")
|
||||
}
|
||||
_, e := os.Stat("/var/lib/gomanager")
|
||||
if errors.Is(e, os.ErrNotExist) {
|
||||
err := os.MkdirAll("/var/lib/gomanager", 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
client, err := ent.Open("sqlite3", fmt.Sprintf("file:%s?mode=rwc&cache=private&_fk=1", sqliteFilename))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed opening connection to sqlite: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
// Run the auto migration tool.
|
||||
if err := client.Schema.Create(context.Background()); err != nil {
|
||||
return fmt.Errorf("failed creating schema resources: %v", err)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(setupCmd)
|
||||
|
||||
// Here you will define your flags and configuration settings.
|
||||
|
||||
// Cobra supports Persistent Flags which will work for this command
|
||||
// and all subcommands, e.g.:
|
||||
// setupCmd.PersistentFlags().String("foo", "", "A help for foo")
|
||||
|
||||
// Cobra supports local flags which will only run when this command
|
||||
// is called directly, e.g.:
|
||||
// setupCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
||||
}
|
51
cmd/start.go
Normal file
51
cmd/start.go
Normal file
@ -0,0 +1,51 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"code.icod.de/dalu/gomanager/runner"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
// startCmd represents the start command
|
||||
var startCmd = &cobra.Command{
|
||||
Use: "start",
|
||||
Short: "starts the autonomous watcher mode, which checks projects every 24h",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
runtime.GOMAXPROCS(1)
|
||||
client, err := ent.Open("sqlite3", fmt.Sprintf("file:%s?mode=rwc&cache=private&_fk=1", sqliteFilename))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed opening connection to sqlite: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
r := runner.NewRunner(client)
|
||||
t := time.NewTicker(time.Hour * 24)
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
if e := r.Run(); e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(startCmd)
|
||||
|
||||
// Here you will define your flags and configuration settings.
|
||||
|
||||
// Cobra supports Persistent Flags which will work for this command
|
||||
// and all subcommands, e.g.:
|
||||
// startCmd.PersistentFlags().String("foo", "", "A help for foo")
|
||||
|
||||
// Cobra supports local flags which will only run when this command
|
||||
// is called directly, e.g.:
|
||||
// startCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
||||
}
|
13
contrib/gomanager-cron.service
Normal file
13
contrib/gomanager-cron.service
Normal file
@ -0,0 +1,13 @@
|
||||
[Unit]
|
||||
Description=Go Manager Cron Service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
User=root
|
||||
Group=root
|
||||
ExecStart=/usr/local/bin/gomanager cron
|
||||
WorkingDirectory=/var/lib/gomanager
|
||||
PrivateTmp=true
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
9
contrib/gomanager-cron.timer
Normal file
9
contrib/gomanager-cron.timer
Normal file
@ -0,0 +1,9 @@
|
||||
[Unit]
|
||||
Description=Run gomanager every day at 2am
|
||||
|
||||
[Timer]
|
||||
OnCalendar=*-*-* 02:00:00 Europe/Berlin
|
||||
Persistent=true
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
14
contrib/gomanager.service
Normal file
14
contrib/gomanager.service
Normal file
@ -0,0 +1,14 @@
|
||||
[Unit]
|
||||
Description=Go Manager
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=root
|
||||
Group=root
|
||||
ExecStart=/usr/local/bin/gomanager start
|
||||
WorkingDirectory=/var/lib/gomanager
|
||||
Restart=always
|
||||
PrivateTmp=true
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
516
ent/client.go
Normal file
516
ent/client.go
Normal file
@ -0,0 +1,516 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent/migrate"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent/logentry"
|
||||
"code.icod.de/dalu/gomanager/ent/project"
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
// Client is the client that holds all ent builders.
|
||||
type Client struct {
|
||||
config
|
||||
// Schema is the client for creating, migrating and dropping schema.
|
||||
Schema *migrate.Schema
|
||||
// Logentry is the client for interacting with the Logentry builders.
|
||||
Logentry *LogentryClient
|
||||
// Project is the client for interacting with the Project builders.
|
||||
Project *ProjectClient
|
||||
}
|
||||
|
||||
// NewClient creates a new client configured with the given options.
|
||||
func NewClient(opts ...Option) *Client {
|
||||
client := &Client{config: newConfig(opts...)}
|
||||
client.init()
|
||||
return client
|
||||
}
|
||||
|
||||
func (c *Client) init() {
|
||||
c.Schema = migrate.NewSchema(c.driver)
|
||||
c.Logentry = NewLogentryClient(c.config)
|
||||
c.Project = NewProjectClient(c.config)
|
||||
}
|
||||
|
||||
type (
|
||||
// config is the configuration for the client and its builder.
|
||||
config struct {
|
||||
// driver used for executing database requests.
|
||||
driver dialect.Driver
|
||||
// debug enable a debug logging.
|
||||
debug bool
|
||||
// log used for logging on debug mode.
|
||||
log func(...any)
|
||||
// hooks to execute on mutations.
|
||||
hooks *hooks
|
||||
// interceptors to execute on queries.
|
||||
inters *inters
|
||||
}
|
||||
// Option function to configure the client.
|
||||
Option func(*config)
|
||||
)
|
||||
|
||||
// newConfig creates a new config for the client.
|
||||
func newConfig(opts ...Option) config {
|
||||
cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}}
|
||||
cfg.options(opts...)
|
||||
return cfg
|
||||
}
|
||||
|
||||
// options applies the options on the config object.
|
||||
func (c *config) options(opts ...Option) {
|
||||
for _, opt := range opts {
|
||||
opt(c)
|
||||
}
|
||||
if c.debug {
|
||||
c.driver = dialect.Debug(c.driver, c.log)
|
||||
}
|
||||
}
|
||||
|
||||
// Debug enables debug logging on the ent.Driver.
|
||||
func Debug() Option {
|
||||
return func(c *config) {
|
||||
c.debug = true
|
||||
}
|
||||
}
|
||||
|
||||
// Log sets the logging function for debug mode.
|
||||
func Log(fn func(...any)) Option {
|
||||
return func(c *config) {
|
||||
c.log = fn
|
||||
}
|
||||
}
|
||||
|
||||
// Driver configures the client driver.
|
||||
func Driver(driver dialect.Driver) Option {
|
||||
return func(c *config) {
|
||||
c.driver = driver
|
||||
}
|
||||
}
|
||||
|
||||
// Open opens a database/sql.DB specified by the driver name and
|
||||
// the data source name, and returns a new client attached to it.
|
||||
// Optional parameters can be added for configuring the client.
|
||||
func Open(driverName, dataSourceName string, options ...Option) (*Client, error) {
|
||||
switch driverName {
|
||||
case dialect.MySQL, dialect.Postgres, dialect.SQLite:
|
||||
drv, err := sql.Open(driverName, dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewClient(append(options, Driver(drv))...), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported driver: %q", driverName)
|
||||
}
|
||||
}
|
||||
|
||||
// ErrTxStarted is returned when trying to start a new transaction from a transactional client.
|
||||
var ErrTxStarted = errors.New("ent: cannot start a transaction within a transaction")
|
||||
|
||||
// Tx returns a new transactional client. The provided context
|
||||
// is used until the transaction is committed or rolled back.
|
||||
func (c *Client) Tx(ctx context.Context) (*Tx, error) {
|
||||
if _, ok := c.driver.(*txDriver); ok {
|
||||
return nil, ErrTxStarted
|
||||
}
|
||||
tx, err := newTx(ctx, c.driver)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ent: starting a transaction: %w", err)
|
||||
}
|
||||
cfg := c.config
|
||||
cfg.driver = tx
|
||||
return &Tx{
|
||||
ctx: ctx,
|
||||
config: cfg,
|
||||
Logentry: NewLogentryClient(cfg),
|
||||
Project: NewProjectClient(cfg),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// BeginTx returns a transactional client with specified options.
|
||||
func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) {
|
||||
if _, ok := c.driver.(*txDriver); ok {
|
||||
return nil, errors.New("ent: cannot start a transaction within a transaction")
|
||||
}
|
||||
tx, err := c.driver.(interface {
|
||||
BeginTx(context.Context, *sql.TxOptions) (dialect.Tx, error)
|
||||
}).BeginTx(ctx, opts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ent: starting a transaction: %w", err)
|
||||
}
|
||||
cfg := c.config
|
||||
cfg.driver = &txDriver{tx: tx, drv: c.driver}
|
||||
return &Tx{
|
||||
ctx: ctx,
|
||||
config: cfg,
|
||||
Logentry: NewLogentryClient(cfg),
|
||||
Project: NewProjectClient(cfg),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Debug returns a new debug-client. It's used to get verbose logging on specific operations.
|
||||
//
|
||||
// client.Debug().
|
||||
// Logentry.
|
||||
// Query().
|
||||
// Count(ctx)
|
||||
func (c *Client) Debug() *Client {
|
||||
if c.debug {
|
||||
return c
|
||||
}
|
||||
cfg := c.config
|
||||
cfg.driver = dialect.Debug(c.driver, c.log)
|
||||
client := &Client{config: cfg}
|
||||
client.init()
|
||||
return client
|
||||
}
|
||||
|
||||
// Close closes the database connection and prevents new queries from starting.
|
||||
func (c *Client) Close() error {
|
||||
return c.driver.Close()
|
||||
}
|
||||
|
||||
// Use adds the mutation hooks to all the entity clients.
|
||||
// In order to add hooks to a specific client, call: `client.Node.Use(...)`.
|
||||
func (c *Client) Use(hooks ...Hook) {
|
||||
c.Logentry.Use(hooks...)
|
||||
c.Project.Use(hooks...)
|
||||
}
|
||||
|
||||
// Intercept adds the query interceptors to all the entity clients.
|
||||
// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`.
|
||||
func (c *Client) Intercept(interceptors ...Interceptor) {
|
||||
c.Logentry.Intercept(interceptors...)
|
||||
c.Project.Intercept(interceptors...)
|
||||
}
|
||||
|
||||
// Mutate implements the ent.Mutator interface.
|
||||
func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) {
|
||||
switch m := m.(type) {
|
||||
case *LogentryMutation:
|
||||
return c.Logentry.mutate(ctx, m)
|
||||
case *ProjectMutation:
|
||||
return c.Project.mutate(ctx, m)
|
||||
default:
|
||||
return nil, fmt.Errorf("ent: unknown mutation type %T", m)
|
||||
}
|
||||
}
|
||||
|
||||
// LogentryClient is a client for the Logentry schema.
|
||||
type LogentryClient struct {
|
||||
config
|
||||
}
|
||||
|
||||
// NewLogentryClient returns a client for the Logentry from the given config.
|
||||
func NewLogentryClient(c config) *LogentryClient {
|
||||
return &LogentryClient{config: c}
|
||||
}
|
||||
|
||||
// Use adds a list of mutation hooks to the hooks stack.
|
||||
// A call to `Use(f, g, h)` equals to `logentry.Hooks(f(g(h())))`.
|
||||
func (c *LogentryClient) Use(hooks ...Hook) {
|
||||
c.hooks.Logentry = append(c.hooks.Logentry, hooks...)
|
||||
}
|
||||
|
||||
// Intercept adds a list of query interceptors to the interceptors stack.
|
||||
// A call to `Intercept(f, g, h)` equals to `logentry.Intercept(f(g(h())))`.
|
||||
func (c *LogentryClient) Intercept(interceptors ...Interceptor) {
|
||||
c.inters.Logentry = append(c.inters.Logentry, interceptors...)
|
||||
}
|
||||
|
||||
// Create returns a builder for creating a Logentry entity.
|
||||
func (c *LogentryClient) Create() *LogentryCreate {
|
||||
mutation := newLogentryMutation(c.config, OpCreate)
|
||||
return &LogentryCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// CreateBulk returns a builder for creating a bulk of Logentry entities.
|
||||
func (c *LogentryClient) CreateBulk(builders ...*LogentryCreate) *LogentryCreateBulk {
|
||||
return &LogentryCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
||||
// a builder and applies setFunc on it.
|
||||
func (c *LogentryClient) MapCreateBulk(slice any, setFunc func(*LogentryCreate, int)) *LogentryCreateBulk {
|
||||
rv := reflect.ValueOf(slice)
|
||||
if rv.Kind() != reflect.Slice {
|
||||
return &LogentryCreateBulk{err: fmt.Errorf("calling to LogentryClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
||||
}
|
||||
builders := make([]*LogentryCreate, rv.Len())
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
builders[i] = c.Create()
|
||||
setFunc(builders[i], i)
|
||||
}
|
||||
return &LogentryCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// Update returns an update builder for Logentry.
|
||||
func (c *LogentryClient) Update() *LogentryUpdate {
|
||||
mutation := newLogentryMutation(c.config, OpUpdate)
|
||||
return &LogentryUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// UpdateOne returns an update builder for the given entity.
|
||||
func (c *LogentryClient) UpdateOne(l *Logentry) *LogentryUpdateOne {
|
||||
mutation := newLogentryMutation(c.config, OpUpdateOne, withLogentry(l))
|
||||
return &LogentryUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// UpdateOneID returns an update builder for the given id.
|
||||
func (c *LogentryClient) UpdateOneID(id int) *LogentryUpdateOne {
|
||||
mutation := newLogentryMutation(c.config, OpUpdateOne, withLogentryID(id))
|
||||
return &LogentryUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// Delete returns a delete builder for Logentry.
|
||||
func (c *LogentryClient) Delete() *LogentryDelete {
|
||||
mutation := newLogentryMutation(c.config, OpDelete)
|
||||
return &LogentryDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// DeleteOne returns a builder for deleting the given entity.
|
||||
func (c *LogentryClient) DeleteOne(l *Logentry) *LogentryDeleteOne {
|
||||
return c.DeleteOneID(l.ID)
|
||||
}
|
||||
|
||||
// DeleteOneID returns a builder for deleting the given entity by its id.
|
||||
func (c *LogentryClient) DeleteOneID(id int) *LogentryDeleteOne {
|
||||
builder := c.Delete().Where(logentry.ID(id))
|
||||
builder.mutation.id = &id
|
||||
builder.mutation.op = OpDeleteOne
|
||||
return &LogentryDeleteOne{builder}
|
||||
}
|
||||
|
||||
// Query returns a query builder for Logentry.
|
||||
func (c *LogentryClient) Query() *LogentryQuery {
|
||||
return &LogentryQuery{
|
||||
config: c.config,
|
||||
ctx: &QueryContext{Type: TypeLogentry},
|
||||
inters: c.Interceptors(),
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a Logentry entity by its id.
|
||||
func (c *LogentryClient) Get(ctx context.Context, id int) (*Logentry, error) {
|
||||
return c.Query().Where(logentry.ID(id)).Only(ctx)
|
||||
}
|
||||
|
||||
// GetX is like Get, but panics if an error occurs.
|
||||
func (c *LogentryClient) GetX(ctx context.Context, id int) *Logentry {
|
||||
obj, err := c.Get(ctx, id)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
// QueryProject queries the project edge of a Logentry.
|
||||
func (c *LogentryClient) QueryProject(l *Logentry) *ProjectQuery {
|
||||
query := (&ProjectClient{config: c.config}).Query()
|
||||
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
|
||||
id := l.ID
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(logentry.Table, logentry.FieldID, id),
|
||||
sqlgraph.To(project.Table, project.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, logentry.ProjectTable, logentry.ProjectColumn),
|
||||
)
|
||||
fromV = sqlgraph.Neighbors(l.driver.Dialect(), step)
|
||||
return fromV, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// Hooks returns the client hooks.
|
||||
func (c *LogentryClient) Hooks() []Hook {
|
||||
return c.hooks.Logentry
|
||||
}
|
||||
|
||||
// Interceptors returns the client interceptors.
|
||||
func (c *LogentryClient) Interceptors() []Interceptor {
|
||||
return c.inters.Logentry
|
||||
}
|
||||
|
||||
func (c *LogentryClient) mutate(ctx context.Context, m *LogentryMutation) (Value, error) {
|
||||
switch m.Op() {
|
||||
case OpCreate:
|
||||
return (&LogentryCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpUpdate:
|
||||
return (&LogentryUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpUpdateOne:
|
||||
return (&LogentryUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpDelete, OpDeleteOne:
|
||||
return (&LogentryDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
|
||||
default:
|
||||
return nil, fmt.Errorf("ent: unknown Logentry mutation op: %q", m.Op())
|
||||
}
|
||||
}
|
||||
|
||||
// ProjectClient is a client for the Project schema.
|
||||
type ProjectClient struct {
|
||||
config
|
||||
}
|
||||
|
||||
// NewProjectClient returns a client for the Project from the given config.
|
||||
func NewProjectClient(c config) *ProjectClient {
|
||||
return &ProjectClient{config: c}
|
||||
}
|
||||
|
||||
// Use adds a list of mutation hooks to the hooks stack.
|
||||
// A call to `Use(f, g, h)` equals to `project.Hooks(f(g(h())))`.
|
||||
func (c *ProjectClient) Use(hooks ...Hook) {
|
||||
c.hooks.Project = append(c.hooks.Project, hooks...)
|
||||
}
|
||||
|
||||
// Intercept adds a list of query interceptors to the interceptors stack.
|
||||
// A call to `Intercept(f, g, h)` equals to `project.Intercept(f(g(h())))`.
|
||||
func (c *ProjectClient) Intercept(interceptors ...Interceptor) {
|
||||
c.inters.Project = append(c.inters.Project, interceptors...)
|
||||
}
|
||||
|
||||
// Create returns a builder for creating a Project entity.
|
||||
func (c *ProjectClient) Create() *ProjectCreate {
|
||||
mutation := newProjectMutation(c.config, OpCreate)
|
||||
return &ProjectCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// CreateBulk returns a builder for creating a bulk of Project entities.
|
||||
func (c *ProjectClient) CreateBulk(builders ...*ProjectCreate) *ProjectCreateBulk {
|
||||
return &ProjectCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
||||
// a builder and applies setFunc on it.
|
||||
func (c *ProjectClient) MapCreateBulk(slice any, setFunc func(*ProjectCreate, int)) *ProjectCreateBulk {
|
||||
rv := reflect.ValueOf(slice)
|
||||
if rv.Kind() != reflect.Slice {
|
||||
return &ProjectCreateBulk{err: fmt.Errorf("calling to ProjectClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
||||
}
|
||||
builders := make([]*ProjectCreate, rv.Len())
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
builders[i] = c.Create()
|
||||
setFunc(builders[i], i)
|
||||
}
|
||||
return &ProjectCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// Update returns an update builder for Project.
|
||||
func (c *ProjectClient) Update() *ProjectUpdate {
|
||||
mutation := newProjectMutation(c.config, OpUpdate)
|
||||
return &ProjectUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// UpdateOne returns an update builder for the given entity.
|
||||
func (c *ProjectClient) UpdateOne(pr *Project) *ProjectUpdateOne {
|
||||
mutation := newProjectMutation(c.config, OpUpdateOne, withProject(pr))
|
||||
return &ProjectUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// UpdateOneID returns an update builder for the given id.
|
||||
func (c *ProjectClient) UpdateOneID(id int) *ProjectUpdateOne {
|
||||
mutation := newProjectMutation(c.config, OpUpdateOne, withProjectID(id))
|
||||
return &ProjectUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// Delete returns a delete builder for Project.
|
||||
func (c *ProjectClient) Delete() *ProjectDelete {
|
||||
mutation := newProjectMutation(c.config, OpDelete)
|
||||
return &ProjectDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// DeleteOne returns a builder for deleting the given entity.
|
||||
func (c *ProjectClient) DeleteOne(pr *Project) *ProjectDeleteOne {
|
||||
return c.DeleteOneID(pr.ID)
|
||||
}
|
||||
|
||||
// DeleteOneID returns a builder for deleting the given entity by its id.
|
||||
func (c *ProjectClient) DeleteOneID(id int) *ProjectDeleteOne {
|
||||
builder := c.Delete().Where(project.ID(id))
|
||||
builder.mutation.id = &id
|
||||
builder.mutation.op = OpDeleteOne
|
||||
return &ProjectDeleteOne{builder}
|
||||
}
|
||||
|
||||
// Query returns a query builder for Project.
|
||||
func (c *ProjectClient) Query() *ProjectQuery {
|
||||
return &ProjectQuery{
|
||||
config: c.config,
|
||||
ctx: &QueryContext{Type: TypeProject},
|
||||
inters: c.Interceptors(),
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a Project entity by its id.
|
||||
func (c *ProjectClient) Get(ctx context.Context, id int) (*Project, error) {
|
||||
return c.Query().Where(project.ID(id)).Only(ctx)
|
||||
}
|
||||
|
||||
// GetX is like Get, but panics if an error occurs.
|
||||
func (c *ProjectClient) GetX(ctx context.Context, id int) *Project {
|
||||
obj, err := c.Get(ctx, id)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
// QueryLogentries queries the logentries edge of a Project.
|
||||
func (c *ProjectClient) QueryLogentries(pr *Project) *LogentryQuery {
|
||||
query := (&LogentryClient{config: c.config}).Query()
|
||||
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
|
||||
id := pr.ID
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(project.Table, project.FieldID, id),
|
||||
sqlgraph.To(logentry.Table, logentry.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, project.LogentriesTable, project.LogentriesColumn),
|
||||
)
|
||||
fromV = sqlgraph.Neighbors(pr.driver.Dialect(), step)
|
||||
return fromV, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// Hooks returns the client hooks.
|
||||
func (c *ProjectClient) Hooks() []Hook {
|
||||
return c.hooks.Project
|
||||
}
|
||||
|
||||
// Interceptors returns the client interceptors.
|
||||
func (c *ProjectClient) Interceptors() []Interceptor {
|
||||
return c.inters.Project
|
||||
}
|
||||
|
||||
func (c *ProjectClient) mutate(ctx context.Context, m *ProjectMutation) (Value, error) {
|
||||
switch m.Op() {
|
||||
case OpCreate:
|
||||
return (&ProjectCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpUpdate:
|
||||
return (&ProjectUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpUpdateOne:
|
||||
return (&ProjectUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpDelete, OpDeleteOne:
|
||||
return (&ProjectDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
|
||||
default:
|
||||
return nil, fmt.Errorf("ent: unknown Project mutation op: %q", m.Op())
|
||||
}
|
||||
}
|
||||
|
||||
// hooks and interceptors per client, for fast access.
|
||||
type (
|
||||
hooks struct {
|
||||
Logentry, Project []ent.Hook
|
||||
}
|
||||
inters struct {
|
||||
Logentry, Project []ent.Interceptor
|
||||
}
|
||||
)
|
610
ent/ent.go
Normal file
610
ent/ent.go
Normal file
@ -0,0 +1,610 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent/logentry"
|
||||
"code.icod.de/dalu/gomanager/ent/project"
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
// ent aliases to avoid import conflicts in user's code.
|
||||
type (
|
||||
Op = ent.Op
|
||||
Hook = ent.Hook
|
||||
Value = ent.Value
|
||||
Query = ent.Query
|
||||
QueryContext = ent.QueryContext
|
||||
Querier = ent.Querier
|
||||
QuerierFunc = ent.QuerierFunc
|
||||
Interceptor = ent.Interceptor
|
||||
InterceptFunc = ent.InterceptFunc
|
||||
Traverser = ent.Traverser
|
||||
TraverseFunc = ent.TraverseFunc
|
||||
Policy = ent.Policy
|
||||
Mutator = ent.Mutator
|
||||
Mutation = ent.Mutation
|
||||
MutateFunc = ent.MutateFunc
|
||||
)
|
||||
|
||||
type clientCtxKey struct{}
|
||||
|
||||
// FromContext returns a Client stored inside a context, or nil if there isn't one.
|
||||
func FromContext(ctx context.Context) *Client {
|
||||
c, _ := ctx.Value(clientCtxKey{}).(*Client)
|
||||
return c
|
||||
}
|
||||
|
||||
// NewContext returns a new context with the given Client attached.
|
||||
func NewContext(parent context.Context, c *Client) context.Context {
|
||||
return context.WithValue(parent, clientCtxKey{}, c)
|
||||
}
|
||||
|
||||
type txCtxKey struct{}
|
||||
|
||||
// TxFromContext returns a Tx stored inside a context, or nil if there isn't one.
|
||||
func TxFromContext(ctx context.Context) *Tx {
|
||||
tx, _ := ctx.Value(txCtxKey{}).(*Tx)
|
||||
return tx
|
||||
}
|
||||
|
||||
// NewTxContext returns a new context with the given Tx attached.
|
||||
func NewTxContext(parent context.Context, tx *Tx) context.Context {
|
||||
return context.WithValue(parent, txCtxKey{}, tx)
|
||||
}
|
||||
|
||||
// OrderFunc applies an ordering on the sql selector.
|
||||
// Deprecated: Use Asc/Desc functions or the package builders instead.
|
||||
type OrderFunc func(*sql.Selector)
|
||||
|
||||
var (
|
||||
initCheck sync.Once
|
||||
columnCheck sql.ColumnCheck
|
||||
)
|
||||
|
||||
// columnChecker checks if the column exists in the given table.
|
||||
func checkColumn(table, column string) error {
|
||||
initCheck.Do(func() {
|
||||
columnCheck = sql.NewColumnCheck(map[string]func(string) bool{
|
||||
logentry.Table: logentry.ValidColumn,
|
||||
project.Table: project.ValidColumn,
|
||||
})
|
||||
})
|
||||
return columnCheck(table, column)
|
||||
}
|
||||
|
||||
// Asc applies the given fields in ASC order.
|
||||
func Asc(fields ...string) func(*sql.Selector) {
|
||||
return func(s *sql.Selector) {
|
||||
for _, f := range fields {
|
||||
if err := checkColumn(s.TableName(), f); err != nil {
|
||||
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
|
||||
}
|
||||
s.OrderBy(sql.Asc(s.C(f)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Desc applies the given fields in DESC order.
|
||||
func Desc(fields ...string) func(*sql.Selector) {
|
||||
return func(s *sql.Selector) {
|
||||
for _, f := range fields {
|
||||
if err := checkColumn(s.TableName(), f); err != nil {
|
||||
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
|
||||
}
|
||||
s.OrderBy(sql.Desc(s.C(f)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AggregateFunc applies an aggregation step on the group-by traversal/selector.
|
||||
type AggregateFunc func(*sql.Selector) string
|
||||
|
||||
// As is a pseudo aggregation function for renaming another other functions with custom names. For example:
|
||||
//
|
||||
// GroupBy(field1, field2).
|
||||
// Aggregate(ent.As(ent.Sum(field1), "sum_field1"), (ent.As(ent.Sum(field2), "sum_field2")).
|
||||
// Scan(ctx, &v)
|
||||
func As(fn AggregateFunc, end string) AggregateFunc {
|
||||
return func(s *sql.Selector) string {
|
||||
return sql.As(fn(s), end)
|
||||
}
|
||||
}
|
||||
|
||||
// Count applies the "count" aggregation function on each group.
|
||||
func Count() AggregateFunc {
|
||||
return func(s *sql.Selector) string {
|
||||
return sql.Count("*")
|
||||
}
|
||||
}
|
||||
|
||||
// Max applies the "max" aggregation function on the given field of each group.
|
||||
func Max(field string) AggregateFunc {
|
||||
return func(s *sql.Selector) string {
|
||||
if err := checkColumn(s.TableName(), field); err != nil {
|
||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
||||
return ""
|
||||
}
|
||||
return sql.Max(s.C(field))
|
||||
}
|
||||
}
|
||||
|
||||
// Mean applies the "mean" aggregation function on the given field of each group.
|
||||
func Mean(field string) AggregateFunc {
|
||||
return func(s *sql.Selector) string {
|
||||
if err := checkColumn(s.TableName(), field); err != nil {
|
||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
||||
return ""
|
||||
}
|
||||
return sql.Avg(s.C(field))
|
||||
}
|
||||
}
|
||||
|
||||
// Min applies the "min" aggregation function on the given field of each group.
|
||||
func Min(field string) AggregateFunc {
|
||||
return func(s *sql.Selector) string {
|
||||
if err := checkColumn(s.TableName(), field); err != nil {
|
||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
||||
return ""
|
||||
}
|
||||
return sql.Min(s.C(field))
|
||||
}
|
||||
}
|
||||
|
||||
// Sum applies the "sum" aggregation function on the given field of each group.
|
||||
func Sum(field string) AggregateFunc {
|
||||
return func(s *sql.Selector) string {
|
||||
if err := checkColumn(s.TableName(), field); err != nil {
|
||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
||||
return ""
|
||||
}
|
||||
return sql.Sum(s.C(field))
|
||||
}
|
||||
}
|
||||
|
||||
// ValidationError returns when validating a field or edge fails.
|
||||
type ValidationError struct {
|
||||
Name string // Field or edge name.
|
||||
err error
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e *ValidationError) Error() string {
|
||||
return e.err.Error()
|
||||
}
|
||||
|
||||
// Unwrap implements the errors.Wrapper interface.
|
||||
func (e *ValidationError) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
// IsValidationError returns a boolean indicating whether the error is a validation error.
|
||||
func IsValidationError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
var e *ValidationError
|
||||
return errors.As(err, &e)
|
||||
}
|
||||
|
||||
// NotFoundError returns when trying to fetch a specific entity and it was not found in the database.
|
||||
type NotFoundError struct {
|
||||
label string
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e *NotFoundError) Error() string {
|
||||
return "ent: " + e.label + " not found"
|
||||
}
|
||||
|
||||
// IsNotFound returns a boolean indicating whether the error is a not found error.
|
||||
func IsNotFound(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
var e *NotFoundError
|
||||
return errors.As(err, &e)
|
||||
}
|
||||
|
||||
// MaskNotFound masks not found error.
|
||||
func MaskNotFound(err error) error {
|
||||
if IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// NotSingularError returns when trying to fetch a singular entity and more then one was found in the database.
|
||||
type NotSingularError struct {
|
||||
label string
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e *NotSingularError) Error() string {
|
||||
return "ent: " + e.label + " not singular"
|
||||
}
|
||||
|
||||
// IsNotSingular returns a boolean indicating whether the error is a not singular error.
|
||||
func IsNotSingular(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
var e *NotSingularError
|
||||
return errors.As(err, &e)
|
||||
}
|
||||
|
||||
// NotLoadedError returns when trying to get a node that was not loaded by the query.
|
||||
type NotLoadedError struct {
|
||||
edge string
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e *NotLoadedError) Error() string {
|
||||
return "ent: " + e.edge + " edge was not loaded"
|
||||
}
|
||||
|
||||
// IsNotLoaded returns a boolean indicating whether the error is a not loaded error.
|
||||
func IsNotLoaded(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
var e *NotLoadedError
|
||||
return errors.As(err, &e)
|
||||
}
|
||||
|
||||
// ConstraintError returns when trying to create/update one or more entities and
|
||||
// one or more of their constraints failed. For example, violation of edge or
|
||||
// field uniqueness.
|
||||
type ConstraintError struct {
|
||||
msg string
|
||||
wrap error
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e ConstraintError) Error() string {
|
||||
return "ent: constraint failed: " + e.msg
|
||||
}
|
||||
|
||||
// Unwrap implements the errors.Wrapper interface.
|
||||
func (e *ConstraintError) Unwrap() error {
|
||||
return e.wrap
|
||||
}
|
||||
|
||||
// IsConstraintError returns a boolean indicating whether the error is a constraint failure.
|
||||
func IsConstraintError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
var e *ConstraintError
|
||||
return errors.As(err, &e)
|
||||
}
|
||||
|
||||
// selector embedded by the different Select/GroupBy builders.
|
||||
type selector struct {
|
||||
label string
|
||||
flds *[]string
|
||||
fns []AggregateFunc
|
||||
scan func(context.Context, any) error
|
||||
}
|
||||
|
||||
// ScanX is like Scan, but panics if an error occurs.
|
||||
func (s *selector) ScanX(ctx context.Context, v any) {
|
||||
if err := s.scan(ctx, v); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Strings returns list of strings from a selector. It is only allowed when selecting one field.
|
||||
func (s *selector) Strings(ctx context.Context) ([]string, error) {
|
||||
if len(*s.flds) > 1 {
|
||||
return nil, errors.New("ent: Strings is not achievable when selecting more than 1 field")
|
||||
}
|
||||
var v []string
|
||||
if err := s.scan(ctx, &v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// StringsX is like Strings, but panics if an error occurs.
|
||||
func (s *selector) StringsX(ctx context.Context) []string {
|
||||
v, err := s.Strings(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// String returns a single string from a selector. It is only allowed when selecting one field.
|
||||
func (s *selector) String(ctx context.Context) (_ string, err error) {
|
||||
var v []string
|
||||
if v, err = s.Strings(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(v) {
|
||||
case 1:
|
||||
return v[0], nil
|
||||
case 0:
|
||||
err = &NotFoundError{s.label}
|
||||
default:
|
||||
err = fmt.Errorf("ent: Strings returned %d results when one was expected", len(v))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// StringX is like String, but panics if an error occurs.
|
||||
func (s *selector) StringX(ctx context.Context) string {
|
||||
v, err := s.String(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Ints returns list of ints from a selector. It is only allowed when selecting one field.
|
||||
func (s *selector) Ints(ctx context.Context) ([]int, error) {
|
||||
if len(*s.flds) > 1 {
|
||||
return nil, errors.New("ent: Ints is not achievable when selecting more than 1 field")
|
||||
}
|
||||
var v []int
|
||||
if err := s.scan(ctx, &v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// IntsX is like Ints, but panics if an error occurs.
|
||||
func (s *selector) IntsX(ctx context.Context) []int {
|
||||
v, err := s.Ints(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Int returns a single int from a selector. It is only allowed when selecting one field.
|
||||
func (s *selector) Int(ctx context.Context) (_ int, err error) {
|
||||
var v []int
|
||||
if v, err = s.Ints(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(v) {
|
||||
case 1:
|
||||
return v[0], nil
|
||||
case 0:
|
||||
err = &NotFoundError{s.label}
|
||||
default:
|
||||
err = fmt.Errorf("ent: Ints returned %d results when one was expected", len(v))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// IntX is like Int, but panics if an error occurs.
|
||||
func (s *selector) IntX(ctx context.Context) int {
|
||||
v, err := s.Int(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Float64s returns list of float64s from a selector. It is only allowed when selecting one field.
|
||||
func (s *selector) Float64s(ctx context.Context) ([]float64, error) {
|
||||
if len(*s.flds) > 1 {
|
||||
return nil, errors.New("ent: Float64s is not achievable when selecting more than 1 field")
|
||||
}
|
||||
var v []float64
|
||||
if err := s.scan(ctx, &v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// Float64sX is like Float64s, but panics if an error occurs.
|
||||
func (s *selector) Float64sX(ctx context.Context) []float64 {
|
||||
v, err := s.Float64s(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Float64 returns a single float64 from a selector. It is only allowed when selecting one field.
|
||||
func (s *selector) Float64(ctx context.Context) (_ float64, err error) {
|
||||
var v []float64
|
||||
if v, err = s.Float64s(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(v) {
|
||||
case 1:
|
||||
return v[0], nil
|
||||
case 0:
|
||||
err = &NotFoundError{s.label}
|
||||
default:
|
||||
err = fmt.Errorf("ent: Float64s returned %d results when one was expected", len(v))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Float64X is like Float64, but panics if an error occurs.
|
||||
func (s *selector) Float64X(ctx context.Context) float64 {
|
||||
v, err := s.Float64(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Bools returns list of bools from a selector. It is only allowed when selecting one field.
|
||||
func (s *selector) Bools(ctx context.Context) ([]bool, error) {
|
||||
if len(*s.flds) > 1 {
|
||||
return nil, errors.New("ent: Bools is not achievable when selecting more than 1 field")
|
||||
}
|
||||
var v []bool
|
||||
if err := s.scan(ctx, &v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// BoolsX is like Bools, but panics if an error occurs.
|
||||
func (s *selector) BoolsX(ctx context.Context) []bool {
|
||||
v, err := s.Bools(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Bool returns a single bool from a selector. It is only allowed when selecting one field.
|
||||
func (s *selector) Bool(ctx context.Context) (_ bool, err error) {
|
||||
var v []bool
|
||||
if v, err = s.Bools(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(v) {
|
||||
case 1:
|
||||
return v[0], nil
|
||||
case 0:
|
||||
err = &NotFoundError{s.label}
|
||||
default:
|
||||
err = fmt.Errorf("ent: Bools returned %d results when one was expected", len(v))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// BoolX is like Bool, but panics if an error occurs.
|
||||
func (s *selector) BoolX(ctx context.Context) bool {
|
||||
v, err := s.Bool(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// withHooks invokes the builder operation with the given hooks, if any.
|
||||
func withHooks[V Value, M any, PM interface {
|
||||
*M
|
||||
Mutation
|
||||
}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) {
|
||||
if len(hooks) == 0 {
|
||||
return exec(ctx)
|
||||
}
|
||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||
mutationT, ok := any(m).(PM)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||
}
|
||||
// Set the mutation to the builder.
|
||||
*mutation = *mutationT
|
||||
return exec(ctx)
|
||||
})
|
||||
for i := len(hooks) - 1; i >= 0; i-- {
|
||||
if hooks[i] == nil {
|
||||
return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||
}
|
||||
mut = hooks[i](mut)
|
||||
}
|
||||
v, err := mut.Mutate(ctx, mutation)
|
||||
if err != nil {
|
||||
return value, err
|
||||
}
|
||||
nv, ok := v.(V)
|
||||
if !ok {
|
||||
return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation)
|
||||
}
|
||||
return nv, nil
|
||||
}
|
||||
|
||||
// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist.
|
||||
func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context {
|
||||
if ent.QueryFromContext(ctx) == nil {
|
||||
qc.Op = op
|
||||
ctx = ent.NewQueryContext(ctx, qc)
|
||||
}
|
||||
return ctx
|
||||
}
|
||||
|
||||
func querierAll[V Value, Q interface {
|
||||
sqlAll(context.Context, ...queryHook) (V, error)
|
||||
}]() Querier {
|
||||
return QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
|
||||
query, ok := q.(Q)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected query type %T", q)
|
||||
}
|
||||
return query.sqlAll(ctx)
|
||||
})
|
||||
}
|
||||
|
||||
func querierCount[Q interface {
|
||||
sqlCount(context.Context) (int, error)
|
||||
}]() Querier {
|
||||
return QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
|
||||
query, ok := q.(Q)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected query type %T", q)
|
||||
}
|
||||
return query.sqlCount(ctx)
|
||||
})
|
||||
}
|
||||
|
||||
func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) {
|
||||
for i := len(inters) - 1; i >= 0; i-- {
|
||||
qr = inters[i].Intercept(qr)
|
||||
}
|
||||
rv, err := qr.Query(ctx, q)
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
vt, ok := rv.(V)
|
||||
if !ok {
|
||||
return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v)
|
||||
}
|
||||
return vt, nil
|
||||
}
|
||||
|
||||
func scanWithInterceptors[Q1 ent.Query, Q2 interface {
|
||||
sqlScan(context.Context, Q1, any) error
|
||||
}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error {
|
||||
rv := reflect.ValueOf(v)
|
||||
var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
|
||||
query, ok := q.(Q1)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected query type %T", q)
|
||||
}
|
||||
if err := selectOrGroup.sqlScan(ctx, query, v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() {
|
||||
return rv.Elem().Interface(), nil
|
||||
}
|
||||
return v, nil
|
||||
})
|
||||
for i := len(inters) - 1; i >= 0; i-- {
|
||||
qr = inters[i].Intercept(qr)
|
||||
}
|
||||
vv, err := qr.Query(ctx, rootQuery)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch rv2 := reflect.ValueOf(vv); {
|
||||
case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer:
|
||||
case rv.Type() == rv2.Type():
|
||||
rv.Elem().Set(rv2.Elem())
|
||||
case rv.Elem().Type() == rv2.Type():
|
||||
rv.Elem().Set(rv2)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// queryHook describes an internal hook for the different sqlAll methods.
|
||||
type queryHook func(context.Context, *sqlgraph.QuerySpec)
|
84
ent/enttest/enttest.go
Normal file
84
ent/enttest/enttest.go
Normal file
@ -0,0 +1,84 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package enttest
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent"
|
||||
// required by schema hooks.
|
||||
_ "code.icod.de/dalu/gomanager/ent/runtime"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent/migrate"
|
||||
"entgo.io/ent/dialect/sql/schema"
|
||||
)
|
||||
|
||||
type (
|
||||
// TestingT is the interface that is shared between
|
||||
// testing.T and testing.B and used by enttest.
|
||||
TestingT interface {
|
||||
FailNow()
|
||||
Error(...any)
|
||||
}
|
||||
|
||||
// Option configures client creation.
|
||||
Option func(*options)
|
||||
|
||||
options struct {
|
||||
opts []ent.Option
|
||||
migrateOpts []schema.MigrateOption
|
||||
}
|
||||
)
|
||||
|
||||
// WithOptions forwards options to client creation.
|
||||
func WithOptions(opts ...ent.Option) Option {
|
||||
return func(o *options) {
|
||||
o.opts = append(o.opts, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// WithMigrateOptions forwards options to auto migration.
|
||||
func WithMigrateOptions(opts ...schema.MigrateOption) Option {
|
||||
return func(o *options) {
|
||||
o.migrateOpts = append(o.migrateOpts, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
func newOptions(opts []Option) *options {
|
||||
o := &options{}
|
||||
for _, opt := range opts {
|
||||
opt(o)
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// Open calls ent.Open and auto-run migration.
|
||||
func Open(t TestingT, driverName, dataSourceName string, opts ...Option) *ent.Client {
|
||||
o := newOptions(opts)
|
||||
c, err := ent.Open(driverName, dataSourceName, o.opts...)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.FailNow()
|
||||
}
|
||||
migrateSchema(t, c, o)
|
||||
return c
|
||||
}
|
||||
|
||||
// NewClient calls ent.NewClient and auto-run migration.
|
||||
func NewClient(t TestingT, opts ...Option) *ent.Client {
|
||||
o := newOptions(opts)
|
||||
c := ent.NewClient(o.opts...)
|
||||
migrateSchema(t, c, o)
|
||||
return c
|
||||
}
|
||||
func migrateSchema(t TestingT, c *ent.Client, o *options) {
|
||||
tables, err := schema.CopyTables(migrate.Tables)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.FailNow()
|
||||
}
|
||||
if err := migrate.Create(context.Background(), c.Schema, tables, o.migrateOpts...); err != nil {
|
||||
t.Error(err)
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
3
ent/generate.go
Normal file
3
ent/generate.go
Normal file
@ -0,0 +1,3 @@
|
||||
package ent
|
||||
|
||||
//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate ./schema
|
211
ent/hook/hook.go
Normal file
211
ent/hook/hook.go
Normal file
@ -0,0 +1,211 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package hook
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent"
|
||||
)
|
||||
|
||||
// The LogentryFunc type is an adapter to allow the use of ordinary
|
||||
// function as Logentry mutator.
|
||||
type LogentryFunc func(context.Context, *ent.LogentryMutation) (ent.Value, error)
|
||||
|
||||
// Mutate calls f(ctx, m).
|
||||
func (f LogentryFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
if mv, ok := m.(*ent.LogentryMutation); ok {
|
||||
return f(ctx, mv)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.LogentryMutation", m)
|
||||
}
|
||||
|
||||
// The ProjectFunc type is an adapter to allow the use of ordinary
|
||||
// function as Project mutator.
|
||||
type ProjectFunc func(context.Context, *ent.ProjectMutation) (ent.Value, error)
|
||||
|
||||
// Mutate calls f(ctx, m).
|
||||
func (f ProjectFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
if mv, ok := m.(*ent.ProjectMutation); ok {
|
||||
return f(ctx, mv)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ProjectMutation", m)
|
||||
}
|
||||
|
||||
// Condition is a hook condition function.
|
||||
type Condition func(context.Context, ent.Mutation) bool
|
||||
|
||||
// And groups conditions with the AND operator.
|
||||
func And(first, second Condition, rest ...Condition) Condition {
|
||||
return func(ctx context.Context, m ent.Mutation) bool {
|
||||
if !first(ctx, m) || !second(ctx, m) {
|
||||
return false
|
||||
}
|
||||
for _, cond := range rest {
|
||||
if !cond(ctx, m) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Or groups conditions with the OR operator.
|
||||
func Or(first, second Condition, rest ...Condition) Condition {
|
||||
return func(ctx context.Context, m ent.Mutation) bool {
|
||||
if first(ctx, m) || second(ctx, m) {
|
||||
return true
|
||||
}
|
||||
for _, cond := range rest {
|
||||
if cond(ctx, m) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Not negates a given condition.
|
||||
func Not(cond Condition) Condition {
|
||||
return func(ctx context.Context, m ent.Mutation) bool {
|
||||
return !cond(ctx, m)
|
||||
}
|
||||
}
|
||||
|
||||
// HasOp is a condition testing mutation operation.
|
||||
func HasOp(op ent.Op) Condition {
|
||||
return func(_ context.Context, m ent.Mutation) bool {
|
||||
return m.Op().Is(op)
|
||||
}
|
||||
}
|
||||
|
||||
// HasAddedFields is a condition validating `.AddedField` on fields.
|
||||
func HasAddedFields(field string, fields ...string) Condition {
|
||||
return func(_ context.Context, m ent.Mutation) bool {
|
||||
if _, exists := m.AddedField(field); !exists {
|
||||
return false
|
||||
}
|
||||
for _, field := range fields {
|
||||
if _, exists := m.AddedField(field); !exists {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// HasClearedFields is a condition validating `.FieldCleared` on fields.
|
||||
func HasClearedFields(field string, fields ...string) Condition {
|
||||
return func(_ context.Context, m ent.Mutation) bool {
|
||||
if exists := m.FieldCleared(field); !exists {
|
||||
return false
|
||||
}
|
||||
for _, field := range fields {
|
||||
if exists := m.FieldCleared(field); !exists {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// HasFields is a condition validating `.Field` on fields.
|
||||
func HasFields(field string, fields ...string) Condition {
|
||||
return func(_ context.Context, m ent.Mutation) bool {
|
||||
if _, exists := m.Field(field); !exists {
|
||||
return false
|
||||
}
|
||||
for _, field := range fields {
|
||||
if _, exists := m.Field(field); !exists {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// If executes the given hook under condition.
|
||||
//
|
||||
// hook.If(ComputeAverage, And(HasFields(...), HasAddedFields(...)))
|
||||
func If(hk ent.Hook, cond Condition) ent.Hook {
|
||||
return func(next ent.Mutator) ent.Mutator {
|
||||
return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
if cond(ctx, m) {
|
||||
return hk(next).Mutate(ctx, m)
|
||||
}
|
||||
return next.Mutate(ctx, m)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// On executes the given hook only for the given operation.
|
||||
//
|
||||
// hook.On(Log, ent.Delete|ent.Create)
|
||||
func On(hk ent.Hook, op ent.Op) ent.Hook {
|
||||
return If(hk, HasOp(op))
|
||||
}
|
||||
|
||||
// Unless skips the given hook only for the given operation.
|
||||
//
|
||||
// hook.Unless(Log, ent.Update|ent.UpdateOne)
|
||||
func Unless(hk ent.Hook, op ent.Op) ent.Hook {
|
||||
return If(hk, Not(HasOp(op)))
|
||||
}
|
||||
|
||||
// FixedError is a hook returning a fixed error.
|
||||
func FixedError(err error) ent.Hook {
|
||||
return func(ent.Mutator) ent.Mutator {
|
||||
return ent.MutateFunc(func(context.Context, ent.Mutation) (ent.Value, error) {
|
||||
return nil, err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Reject returns a hook that rejects all operations that match op.
|
||||
//
|
||||
// func (T) Hooks() []ent.Hook {
|
||||
// return []ent.Hook{
|
||||
// Reject(ent.Delete|ent.Update),
|
||||
// }
|
||||
// }
|
||||
func Reject(op ent.Op) ent.Hook {
|
||||
hk := FixedError(fmt.Errorf("%s operation is not allowed", op))
|
||||
return On(hk, op)
|
||||
}
|
||||
|
||||
// Chain acts as a list of hooks and is effectively immutable.
|
||||
// Once created, it will always hold the same set of hooks in the same order.
|
||||
type Chain struct {
|
||||
hooks []ent.Hook
|
||||
}
|
||||
|
||||
// NewChain creates a new chain of hooks.
|
||||
func NewChain(hooks ...ent.Hook) Chain {
|
||||
return Chain{append([]ent.Hook(nil), hooks...)}
|
||||
}
|
||||
|
||||
// Hook chains the list of hooks and returns the final hook.
|
||||
func (c Chain) Hook() ent.Hook {
|
||||
return func(mutator ent.Mutator) ent.Mutator {
|
||||
for i := len(c.hooks) - 1; i >= 0; i-- {
|
||||
mutator = c.hooks[i](mutator)
|
||||
}
|
||||
return mutator
|
||||
}
|
||||
}
|
||||
|
||||
// Append extends a chain, adding the specified hook
|
||||
// as the last ones in the mutation flow.
|
||||
func (c Chain) Append(hooks ...ent.Hook) Chain {
|
||||
newHooks := make([]ent.Hook, 0, len(c.hooks)+len(hooks))
|
||||
newHooks = append(newHooks, c.hooks...)
|
||||
newHooks = append(newHooks, hooks...)
|
||||
return Chain{newHooks}
|
||||
}
|
||||
|
||||
// Extend extends a chain, adding the specified chain
|
||||
// as the last ones in the mutation flow.
|
||||
func (c Chain) Extend(chain Chain) Chain {
|
||||
return c.Append(chain.hooks...)
|
||||
}
|
158
ent/logentry.go
Normal file
158
ent/logentry.go
Normal file
@ -0,0 +1,158 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent/logentry"
|
||||
"code.icod.de/dalu/gomanager/ent/project"
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
)
|
||||
|
||||
// Logentry is the model entity for the Logentry schema.
|
||||
type Logentry struct {
|
||||
config `json:"-"`
|
||||
// ID of the ent.
|
||||
ID int `json:"id,omitempty"`
|
||||
// Date holds the value of the "date" field.
|
||||
Date time.Time `json:"date,omitempty"`
|
||||
// Content holds the value of the "content" field.
|
||||
Content string `json:"content,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the LogentryQuery when eager-loading is set.
|
||||
Edges LogentryEdges `json:"edges"`
|
||||
project_logentries *int
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// LogentryEdges holds the relations/edges for other nodes in the graph.
|
||||
type LogentryEdges struct {
|
||||
// Project holds the value of the project edge.
|
||||
Project *Project `json:"project,omitempty"`
|
||||
// loadedTypes holds the information for reporting if a
|
||||
// type was loaded (or requested) in eager-loading or not.
|
||||
loadedTypes [1]bool
|
||||
}
|
||||
|
||||
// ProjectOrErr returns the Project value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e LogentryEdges) ProjectOrErr() (*Project, error) {
|
||||
if e.loadedTypes[0] {
|
||||
if e.Project == nil {
|
||||
// Edge was loaded but was not found.
|
||||
return nil, &NotFoundError{label: project.Label}
|
||||
}
|
||||
return e.Project, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "project"}
|
||||
}
|
||||
|
||||
// scanValues returns the types for scanning values from sql.Rows.
|
||||
func (*Logentry) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case logentry.FieldID:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case logentry.FieldContent:
|
||||
values[i] = new(sql.NullString)
|
||||
case logentry.FieldDate:
|
||||
values[i] = new(sql.NullTime)
|
||||
case logentry.ForeignKeys[0]: // project_logentries
|
||||
values[i] = new(sql.NullInt64)
|
||||
default:
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||
// to the Logentry fields.
|
||||
func (l *Logentry) assignValues(columns []string, values []any) error {
|
||||
if m, n := len(values), len(columns); m < n {
|
||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||
}
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case logentry.FieldID:
|
||||
value, ok := values[i].(*sql.NullInt64)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected type %T for field id", value)
|
||||
}
|
||||
l.ID = int(value.Int64)
|
||||
case logentry.FieldDate:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field date", values[i])
|
||||
} else if value.Valid {
|
||||
l.Date = value.Time
|
||||
}
|
||||
case logentry.FieldContent:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field content", values[i])
|
||||
} else if value.Valid {
|
||||
l.Content = value.String
|
||||
}
|
||||
case logentry.ForeignKeys[0]:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for edge-field project_logentries", value)
|
||||
} else if value.Valid {
|
||||
l.project_logentries = new(int)
|
||||
*l.project_logentries = int(value.Int64)
|
||||
}
|
||||
default:
|
||||
l.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the Logentry.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (l *Logentry) Value(name string) (ent.Value, error) {
|
||||
return l.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryProject queries the "project" edge of the Logentry entity.
|
||||
func (l *Logentry) QueryProject() *ProjectQuery {
|
||||
return NewLogentryClient(l.config).QueryProject(l)
|
||||
}
|
||||
|
||||
// Update returns a builder for updating this Logentry.
|
||||
// Note that you need to call Logentry.Unwrap() before calling this method if this Logentry
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
func (l *Logentry) Update() *LogentryUpdateOne {
|
||||
return NewLogentryClient(l.config).UpdateOne(l)
|
||||
}
|
||||
|
||||
// Unwrap unwraps the Logentry entity that was returned from a transaction after it was closed,
|
||||
// so that all future queries will be executed through the driver which created the transaction.
|
||||
func (l *Logentry) Unwrap() *Logentry {
|
||||
_tx, ok := l.config.driver.(*txDriver)
|
||||
if !ok {
|
||||
panic("ent: Logentry is not a transactional entity")
|
||||
}
|
||||
l.config.driver = _tx.drv
|
||||
return l
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer.
|
||||
func (l *Logentry) String() string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("Logentry(")
|
||||
builder.WriteString(fmt.Sprintf("id=%v, ", l.ID))
|
||||
builder.WriteString("date=")
|
||||
builder.WriteString(l.Date.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("content=")
|
||||
builder.WriteString(l.Content)
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// Logentries is a parsable slice of Logentry.
|
||||
type Logentries []*Logentry
|
97
ent/logentry/logentry.go
Normal file
97
ent/logentry/logentry.go
Normal file
@ -0,0 +1,97 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package logentry
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
const (
|
||||
// Label holds the string label denoting the logentry type in the database.
|
||||
Label = "logentry"
|
||||
// FieldID holds the string denoting the id field in the database.
|
||||
FieldID = "id"
|
||||
// FieldDate holds the string denoting the date field in the database.
|
||||
FieldDate = "date"
|
||||
// FieldContent holds the string denoting the content field in the database.
|
||||
FieldContent = "content"
|
||||
// EdgeProject holds the string denoting the project edge name in mutations.
|
||||
EdgeProject = "project"
|
||||
// Table holds the table name of the logentry in the database.
|
||||
Table = "logentries"
|
||||
// ProjectTable is the table that holds the project relation/edge.
|
||||
ProjectTable = "logentries"
|
||||
// ProjectInverseTable is the table name for the Project entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "project" package.
|
||||
ProjectInverseTable = "projects"
|
||||
// ProjectColumn is the table column denoting the project relation/edge.
|
||||
ProjectColumn = "project_logentries"
|
||||
)
|
||||
|
||||
// Columns holds all SQL columns for logentry fields.
|
||||
var Columns = []string{
|
||||
FieldID,
|
||||
FieldDate,
|
||||
FieldContent,
|
||||
}
|
||||
|
||||
// ForeignKeys holds the SQL foreign-keys that are owned by the "logentries"
|
||||
// table and are not defined as standalone fields in the schema.
|
||||
var ForeignKeys = []string{
|
||||
"project_logentries",
|
||||
}
|
||||
|
||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||
func ValidColumn(column string) bool {
|
||||
for i := range Columns {
|
||||
if column == Columns[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for i := range ForeignKeys {
|
||||
if column == ForeignKeys[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultDate holds the default value on creation for the "date" field.
|
||||
DefaultDate func() time.Time
|
||||
)
|
||||
|
||||
// OrderOption defines the ordering options for the Logentry queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByDate orders the results by the date field.
|
||||
func ByDate(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldDate, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByContent orders the results by the content field.
|
||||
func ByContent(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldContent, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByProjectField orders the results by project field.
|
||||
func ByProjectField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newProjectStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
func newProjectStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(ProjectInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, ProjectTable, ProjectColumn),
|
||||
)
|
||||
}
|
209
ent/logentry/where.go
Normal file
209
ent/logentry/where.go
Normal file
@ -0,0 +1,209 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package logentry
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent/predicate"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
// ID filters vertices based on their ID field.
|
||||
func ID(id int) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDEQ applies the EQ predicate on the ID field.
|
||||
func IDEQ(id int) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDNEQ applies the NEQ predicate on the ID field.
|
||||
func IDNEQ(id int) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldNEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDIn applies the In predicate on the ID field.
|
||||
func IDIn(ids ...int) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDNotIn applies the NotIn predicate on the ID field.
|
||||
func IDNotIn(ids ...int) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldNotIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDGT applies the GT predicate on the ID field.
|
||||
func IDGT(id int) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldGT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDGTE applies the GTE predicate on the ID field.
|
||||
func IDGTE(id int) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldGTE(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLT applies the LT predicate on the ID field.
|
||||
func IDLT(id int) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldLT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLTE applies the LTE predicate on the ID field.
|
||||
func IDLTE(id int) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldLTE(FieldID, id))
|
||||
}
|
||||
|
||||
// Date applies equality check predicate on the "date" field. It's identical to DateEQ.
|
||||
func Date(v time.Time) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldEQ(FieldDate, v))
|
||||
}
|
||||
|
||||
// Content applies equality check predicate on the "content" field. It's identical to ContentEQ.
|
||||
func Content(v string) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldEQ(FieldContent, v))
|
||||
}
|
||||
|
||||
// DateEQ applies the EQ predicate on the "date" field.
|
||||
func DateEQ(v time.Time) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldEQ(FieldDate, v))
|
||||
}
|
||||
|
||||
// DateNEQ applies the NEQ predicate on the "date" field.
|
||||
func DateNEQ(v time.Time) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldNEQ(FieldDate, v))
|
||||
}
|
||||
|
||||
// DateIn applies the In predicate on the "date" field.
|
||||
func DateIn(vs ...time.Time) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldIn(FieldDate, vs...))
|
||||
}
|
||||
|
||||
// DateNotIn applies the NotIn predicate on the "date" field.
|
||||
func DateNotIn(vs ...time.Time) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldNotIn(FieldDate, vs...))
|
||||
}
|
||||
|
||||
// DateGT applies the GT predicate on the "date" field.
|
||||
func DateGT(v time.Time) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldGT(FieldDate, v))
|
||||
}
|
||||
|
||||
// DateGTE applies the GTE predicate on the "date" field.
|
||||
func DateGTE(v time.Time) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldGTE(FieldDate, v))
|
||||
}
|
||||
|
||||
// DateLT applies the LT predicate on the "date" field.
|
||||
func DateLT(v time.Time) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldLT(FieldDate, v))
|
||||
}
|
||||
|
||||
// DateLTE applies the LTE predicate on the "date" field.
|
||||
func DateLTE(v time.Time) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldLTE(FieldDate, v))
|
||||
}
|
||||
|
||||
// ContentEQ applies the EQ predicate on the "content" field.
|
||||
func ContentEQ(v string) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldEQ(FieldContent, v))
|
||||
}
|
||||
|
||||
// ContentNEQ applies the NEQ predicate on the "content" field.
|
||||
func ContentNEQ(v string) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldNEQ(FieldContent, v))
|
||||
}
|
||||
|
||||
// ContentIn applies the In predicate on the "content" field.
|
||||
func ContentIn(vs ...string) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldIn(FieldContent, vs...))
|
||||
}
|
||||
|
||||
// ContentNotIn applies the NotIn predicate on the "content" field.
|
||||
func ContentNotIn(vs ...string) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldNotIn(FieldContent, vs...))
|
||||
}
|
||||
|
||||
// ContentGT applies the GT predicate on the "content" field.
|
||||
func ContentGT(v string) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldGT(FieldContent, v))
|
||||
}
|
||||
|
||||
// ContentGTE applies the GTE predicate on the "content" field.
|
||||
func ContentGTE(v string) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldGTE(FieldContent, v))
|
||||
}
|
||||
|
||||
// ContentLT applies the LT predicate on the "content" field.
|
||||
func ContentLT(v string) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldLT(FieldContent, v))
|
||||
}
|
||||
|
||||
// ContentLTE applies the LTE predicate on the "content" field.
|
||||
func ContentLTE(v string) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldLTE(FieldContent, v))
|
||||
}
|
||||
|
||||
// ContentContains applies the Contains predicate on the "content" field.
|
||||
func ContentContains(v string) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldContains(FieldContent, v))
|
||||
}
|
||||
|
||||
// ContentHasPrefix applies the HasPrefix predicate on the "content" field.
|
||||
func ContentHasPrefix(v string) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldHasPrefix(FieldContent, v))
|
||||
}
|
||||
|
||||
// ContentHasSuffix applies the HasSuffix predicate on the "content" field.
|
||||
func ContentHasSuffix(v string) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldHasSuffix(FieldContent, v))
|
||||
}
|
||||
|
||||
// ContentEqualFold applies the EqualFold predicate on the "content" field.
|
||||
func ContentEqualFold(v string) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldEqualFold(FieldContent, v))
|
||||
}
|
||||
|
||||
// ContentContainsFold applies the ContainsFold predicate on the "content" field.
|
||||
func ContentContainsFold(v string) predicate.Logentry {
|
||||
return predicate.Logentry(sql.FieldContainsFold(FieldContent, v))
|
||||
}
|
||||
|
||||
// HasProject applies the HasEdge predicate on the "project" edge.
|
||||
func HasProject() predicate.Logentry {
|
||||
return predicate.Logentry(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, ProjectTable, ProjectColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasProjectWith applies the HasEdge predicate on the "project" edge with a given conditions (other predicates).
|
||||
func HasProjectWith(preds ...predicate.Project) predicate.Logentry {
|
||||
return predicate.Logentry(func(s *sql.Selector) {
|
||||
step := newProjectStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.Logentry) predicate.Logentry {
|
||||
return predicate.Logentry(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.Logentry) predicate.Logentry {
|
||||
return predicate.Logentry(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.Logentry) predicate.Logentry {
|
||||
return predicate.Logentry(sql.NotPredicates(p))
|
||||
}
|
264
ent/logentry_create.go
Normal file
264
ent/logentry_create.go
Normal file
@ -0,0 +1,264 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent/logentry"
|
||||
"code.icod.de/dalu/gomanager/ent/project"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
)
|
||||
|
||||
// LogentryCreate is the builder for creating a Logentry entity.
|
||||
type LogentryCreate struct {
|
||||
config
|
||||
mutation *LogentryMutation
|
||||
hooks []Hook
|
||||
}
|
||||
|
||||
// SetDate sets the "date" field.
|
||||
func (lc *LogentryCreate) SetDate(t time.Time) *LogentryCreate {
|
||||
lc.mutation.SetDate(t)
|
||||
return lc
|
||||
}
|
||||
|
||||
// SetNillableDate sets the "date" field if the given value is not nil.
|
||||
func (lc *LogentryCreate) SetNillableDate(t *time.Time) *LogentryCreate {
|
||||
if t != nil {
|
||||
lc.SetDate(*t)
|
||||
}
|
||||
return lc
|
||||
}
|
||||
|
||||
// SetContent sets the "content" field.
|
||||
func (lc *LogentryCreate) SetContent(s string) *LogentryCreate {
|
||||
lc.mutation.SetContent(s)
|
||||
return lc
|
||||
}
|
||||
|
||||
// SetID sets the "id" field.
|
||||
func (lc *LogentryCreate) SetID(i int) *LogentryCreate {
|
||||
lc.mutation.SetID(i)
|
||||
return lc
|
||||
}
|
||||
|
||||
// SetProjectID sets the "project" edge to the Project entity by ID.
|
||||
func (lc *LogentryCreate) SetProjectID(id int) *LogentryCreate {
|
||||
lc.mutation.SetProjectID(id)
|
||||
return lc
|
||||
}
|
||||
|
||||
// SetNillableProjectID sets the "project" edge to the Project entity by ID if the given value is not nil.
|
||||
func (lc *LogentryCreate) SetNillableProjectID(id *int) *LogentryCreate {
|
||||
if id != nil {
|
||||
lc = lc.SetProjectID(*id)
|
||||
}
|
||||
return lc
|
||||
}
|
||||
|
||||
// SetProject sets the "project" edge to the Project entity.
|
||||
func (lc *LogentryCreate) SetProject(p *Project) *LogentryCreate {
|
||||
return lc.SetProjectID(p.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the LogentryMutation object of the builder.
|
||||
func (lc *LogentryCreate) Mutation() *LogentryMutation {
|
||||
return lc.mutation
|
||||
}
|
||||
|
||||
// Save creates the Logentry in the database.
|
||||
func (lc *LogentryCreate) Save(ctx context.Context) (*Logentry, error) {
|
||||
lc.defaults()
|
||||
return withHooks(ctx, lc.sqlSave, lc.mutation, lc.hooks)
|
||||
}
|
||||
|
||||
// SaveX calls Save and panics if Save returns an error.
|
||||
func (lc *LogentryCreate) SaveX(ctx context.Context) *Logentry {
|
||||
v, err := lc.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (lc *LogentryCreate) Exec(ctx context.Context) error {
|
||||
_, err := lc.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (lc *LogentryCreate) ExecX(ctx context.Context) {
|
||||
if err := lc.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (lc *LogentryCreate) defaults() {
|
||||
if _, ok := lc.mutation.Date(); !ok {
|
||||
v := logentry.DefaultDate()
|
||||
lc.mutation.SetDate(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (lc *LogentryCreate) check() error {
|
||||
if _, ok := lc.mutation.Date(); !ok {
|
||||
return &ValidationError{Name: "date", err: errors.New(`ent: missing required field "Logentry.date"`)}
|
||||
}
|
||||
if _, ok := lc.mutation.Content(); !ok {
|
||||
return &ValidationError{Name: "content", err: errors.New(`ent: missing required field "Logentry.content"`)}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lc *LogentryCreate) sqlSave(ctx context.Context) (*Logentry, error) {
|
||||
if err := lc.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_node, _spec := lc.createSpec()
|
||||
if err := sqlgraph.CreateNode(ctx, lc.driver, _spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if _spec.ID.Value != _node.ID {
|
||||
id := _spec.ID.Value.(int64)
|
||||
_node.ID = int(id)
|
||||
}
|
||||
lc.mutation.id = &_node.ID
|
||||
lc.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
|
||||
func (lc *LogentryCreate) createSpec() (*Logentry, *sqlgraph.CreateSpec) {
|
||||
var (
|
||||
_node = &Logentry{config: lc.config}
|
||||
_spec = sqlgraph.NewCreateSpec(logentry.Table, sqlgraph.NewFieldSpec(logentry.FieldID, field.TypeInt))
|
||||
)
|
||||
if id, ok := lc.mutation.ID(); ok {
|
||||
_node.ID = id
|
||||
_spec.ID.Value = id
|
||||
}
|
||||
if value, ok := lc.mutation.Date(); ok {
|
||||
_spec.SetField(logentry.FieldDate, field.TypeTime, value)
|
||||
_node.Date = value
|
||||
}
|
||||
if value, ok := lc.mutation.Content(); ok {
|
||||
_spec.SetField(logentry.FieldContent, field.TypeString, value)
|
||||
_node.Content = value
|
||||
}
|
||||
if nodes := lc.mutation.ProjectIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: logentry.ProjectTable,
|
||||
Columns: []string{logentry.ProjectColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(project.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_node.project_logentries = &nodes[0]
|
||||
_spec.Edges = append(_spec.Edges, edge)
|
||||
}
|
||||
return _node, _spec
|
||||
}
|
||||
|
||||
// LogentryCreateBulk is the builder for creating many Logentry entities in bulk.
|
||||
type LogentryCreateBulk struct {
|
||||
config
|
||||
err error
|
||||
builders []*LogentryCreate
|
||||
}
|
||||
|
||||
// Save creates the Logentry entities in the database.
|
||||
func (lcb *LogentryCreateBulk) Save(ctx context.Context) ([]*Logentry, error) {
|
||||
if lcb.err != nil {
|
||||
return nil, lcb.err
|
||||
}
|
||||
specs := make([]*sqlgraph.CreateSpec, len(lcb.builders))
|
||||
nodes := make([]*Logentry, len(lcb.builders))
|
||||
mutators := make([]Mutator, len(lcb.builders))
|
||||
for i := range lcb.builders {
|
||||
func(i int, root context.Context) {
|
||||
builder := lcb.builders[i]
|
||||
builder.defaults()
|
||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||
mutation, ok := m.(*LogentryMutation)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||
}
|
||||
if err := builder.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
builder.mutation = mutation
|
||||
var err error
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, lcb.builders[i+1].mutation)
|
||||
} else {
|
||||
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||
// Invoke the actual operation on the latest mutation in the chain.
|
||||
if err = sqlgraph.BatchCreate(ctx, lcb.driver, spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mutation.id = &nodes[i].ID
|
||||
if specs[i].ID.Value != nil && nodes[i].ID == 0 {
|
||||
id := specs[i].ID.Value.(int64)
|
||||
nodes[i].ID = int(id)
|
||||
}
|
||||
mutation.done = true
|
||||
return nodes[i], nil
|
||||
})
|
||||
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||
mut = builder.hooks[i](mut)
|
||||
}
|
||||
mutators[i] = mut
|
||||
}(i, ctx)
|
||||
}
|
||||
if len(mutators) > 0 {
|
||||
if _, err := mutators[0].Mutate(ctx, lcb.builders[0].mutation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (lcb *LogentryCreateBulk) SaveX(ctx context.Context) []*Logentry {
|
||||
v, err := lcb.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (lcb *LogentryCreateBulk) Exec(ctx context.Context) error {
|
||||
_, err := lcb.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (lcb *LogentryCreateBulk) ExecX(ctx context.Context) {
|
||||
if err := lcb.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
88
ent/logentry_delete.go
Normal file
88
ent/logentry_delete.go
Normal file
@ -0,0 +1,88 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent/logentry"
|
||||
"code.icod.de/dalu/gomanager/ent/predicate"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
)
|
||||
|
||||
// LogentryDelete is the builder for deleting a Logentry entity.
|
||||
type LogentryDelete struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *LogentryMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the LogentryDelete builder.
|
||||
func (ld *LogentryDelete) Where(ps ...predicate.Logentry) *LogentryDelete {
|
||||
ld.mutation.Where(ps...)
|
||||
return ld
|
||||
}
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (ld *LogentryDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, ld.sqlExec, ld.mutation, ld.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (ld *LogentryDelete) ExecX(ctx context.Context) int {
|
||||
n, err := ld.Exec(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (ld *LogentryDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
_spec := sqlgraph.NewDeleteSpec(logentry.Table, sqlgraph.NewFieldSpec(logentry.FieldID, field.TypeInt))
|
||||
if ps := ld.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
affected, err := sqlgraph.DeleteNodes(ctx, ld.driver, _spec)
|
||||
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
ld.mutation.done = true
|
||||
return affected, err
|
||||
}
|
||||
|
||||
// LogentryDeleteOne is the builder for deleting a single Logentry entity.
|
||||
type LogentryDeleteOne struct {
|
||||
ld *LogentryDelete
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the LogentryDelete builder.
|
||||
func (ldo *LogentryDeleteOne) Where(ps ...predicate.Logentry) *LogentryDeleteOne {
|
||||
ldo.ld.mutation.Where(ps...)
|
||||
return ldo
|
||||
}
|
||||
|
||||
// Exec executes the deletion query.
|
||||
func (ldo *LogentryDeleteOne) Exec(ctx context.Context) error {
|
||||
n, err := ldo.ld.Exec(ctx)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case n == 0:
|
||||
return &NotFoundError{logentry.Label}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (ldo *LogentryDeleteOne) ExecX(ctx context.Context) {
|
||||
if err := ldo.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
613
ent/logentry_query.go
Normal file
613
ent/logentry_query.go
Normal file
@ -0,0 +1,613 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent/logentry"
|
||||
"code.icod.de/dalu/gomanager/ent/predicate"
|
||||
"code.icod.de/dalu/gomanager/ent/project"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
)
|
||||
|
||||
// LogentryQuery is the builder for querying Logentry entities.
|
||||
type LogentryQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []logentry.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.Logentry
|
||||
withProject *ProjectQuery
|
||||
withFKs bool
|
||||
// intermediate query (i.e. traversal path).
|
||||
sql *sql.Selector
|
||||
path func(context.Context) (*sql.Selector, error)
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the LogentryQuery builder.
|
||||
func (lq *LogentryQuery) Where(ps ...predicate.Logentry) *LogentryQuery {
|
||||
lq.predicates = append(lq.predicates, ps...)
|
||||
return lq
|
||||
}
|
||||
|
||||
// Limit the number of records to be returned by this query.
|
||||
func (lq *LogentryQuery) Limit(limit int) *LogentryQuery {
|
||||
lq.ctx.Limit = &limit
|
||||
return lq
|
||||
}
|
||||
|
||||
// Offset to start from.
|
||||
func (lq *LogentryQuery) Offset(offset int) *LogentryQuery {
|
||||
lq.ctx.Offset = &offset
|
||||
return lq
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (lq *LogentryQuery) Unique(unique bool) *LogentryQuery {
|
||||
lq.ctx.Unique = &unique
|
||||
return lq
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (lq *LogentryQuery) Order(o ...logentry.OrderOption) *LogentryQuery {
|
||||
lq.order = append(lq.order, o...)
|
||||
return lq
|
||||
}
|
||||
|
||||
// QueryProject chains the current query on the "project" edge.
|
||||
func (lq *LogentryQuery) QueryProject() *ProjectQuery {
|
||||
query := (&ProjectClient{config: lq.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := lq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := lq.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(logentry.Table, logentry.FieldID, selector),
|
||||
sqlgraph.To(project.Table, project.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, logentry.ProjectTable, logentry.ProjectColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(lq.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// First returns the first Logentry entity from the query.
|
||||
// Returns a *NotFoundError when no Logentry was found.
|
||||
func (lq *LogentryQuery) First(ctx context.Context) (*Logentry, error) {
|
||||
nodes, err := lq.Limit(1).All(setContextOp(ctx, lq.ctx, "First"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nil, &NotFoundError{logentry.Label}
|
||||
}
|
||||
return nodes[0], nil
|
||||
}
|
||||
|
||||
// FirstX is like First, but panics if an error occurs.
|
||||
func (lq *LogentryQuery) FirstX(ctx context.Context) *Logentry {
|
||||
node, err := lq.First(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// FirstID returns the first Logentry ID from the query.
|
||||
// Returns a *NotFoundError when no Logentry ID was found.
|
||||
func (lq *LogentryQuery) FirstID(ctx context.Context) (id int, err error) {
|
||||
var ids []int
|
||||
if ids, err = lq.Limit(1).IDs(setContextOp(ctx, lq.ctx, "FirstID")); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
err = &NotFoundError{logentry.Label}
|
||||
return
|
||||
}
|
||||
return ids[0], nil
|
||||
}
|
||||
|
||||
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||
func (lq *LogentryQuery) FirstIDX(ctx context.Context) int {
|
||||
id, err := lq.FirstID(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// Only returns a single Logentry entity found by the query, ensuring it only returns one.
|
||||
// Returns a *NotSingularError when more than one Logentry entity is found.
|
||||
// Returns a *NotFoundError when no Logentry entities are found.
|
||||
func (lq *LogentryQuery) Only(ctx context.Context) (*Logentry, error) {
|
||||
nodes, err := lq.Limit(2).All(setContextOp(ctx, lq.ctx, "Only"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(nodes) {
|
||||
case 1:
|
||||
return nodes[0], nil
|
||||
case 0:
|
||||
return nil, &NotFoundError{logentry.Label}
|
||||
default:
|
||||
return nil, &NotSingularError{logentry.Label}
|
||||
}
|
||||
}
|
||||
|
||||
// OnlyX is like Only, but panics if an error occurs.
|
||||
func (lq *LogentryQuery) OnlyX(ctx context.Context) *Logentry {
|
||||
node, err := lq.Only(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// OnlyID is like Only, but returns the only Logentry ID in the query.
|
||||
// Returns a *NotSingularError when more than one Logentry ID is found.
|
||||
// Returns a *NotFoundError when no entities are found.
|
||||
func (lq *LogentryQuery) OnlyID(ctx context.Context) (id int, err error) {
|
||||
var ids []int
|
||||
if ids, err = lq.Limit(2).IDs(setContextOp(ctx, lq.ctx, "OnlyID")); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
case 1:
|
||||
id = ids[0]
|
||||
case 0:
|
||||
err = &NotFoundError{logentry.Label}
|
||||
default:
|
||||
err = &NotSingularError{logentry.Label}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||
func (lq *LogentryQuery) OnlyIDX(ctx context.Context) int {
|
||||
id, err := lq.OnlyID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// All executes the query and returns a list of Logentries.
|
||||
func (lq *LogentryQuery) All(ctx context.Context) ([]*Logentry, error) {
|
||||
ctx = setContextOp(ctx, lq.ctx, "All")
|
||||
if err := lq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qr := querierAll[[]*Logentry, *LogentryQuery]()
|
||||
return withInterceptors[[]*Logentry](ctx, lq, qr, lq.inters)
|
||||
}
|
||||
|
||||
// AllX is like All, but panics if an error occurs.
|
||||
func (lq *LogentryQuery) AllX(ctx context.Context) []*Logentry {
|
||||
nodes, err := lq.All(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// IDs executes the query and returns a list of Logentry IDs.
|
||||
func (lq *LogentryQuery) IDs(ctx context.Context) (ids []int, err error) {
|
||||
if lq.ctx.Unique == nil && lq.path != nil {
|
||||
lq.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, lq.ctx, "IDs")
|
||||
if err = lq.Select(logentry.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// IDsX is like IDs, but panics if an error occurs.
|
||||
func (lq *LogentryQuery) IDsX(ctx context.Context) []int {
|
||||
ids, err := lq.IDs(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// Count returns the count of the given query.
|
||||
func (lq *LogentryQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, lq.ctx, "Count")
|
||||
if err := lq.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return withInterceptors[int](ctx, lq, querierCount[*LogentryQuery](), lq.inters)
|
||||
}
|
||||
|
||||
// CountX is like Count, but panics if an error occurs.
|
||||
func (lq *LogentryQuery) CountX(ctx context.Context) int {
|
||||
count, err := lq.Count(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (lq *LogentryQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, lq.ctx, "Exist")
|
||||
switch _, err := lq.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExistX is like Exist, but panics if an error occurs.
|
||||
func (lq *LogentryQuery) ExistX(ctx context.Context) bool {
|
||||
exist, err := lq.Exist(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return exist
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the LogentryQuery builder, including all associated steps. It can be
|
||||
// used to prepare common query builders and use them differently after the clone is made.
|
||||
func (lq *LogentryQuery) Clone() *LogentryQuery {
|
||||
if lq == nil {
|
||||
return nil
|
||||
}
|
||||
return &LogentryQuery{
|
||||
config: lq.config,
|
||||
ctx: lq.ctx.Clone(),
|
||||
order: append([]logentry.OrderOption{}, lq.order...),
|
||||
inters: append([]Interceptor{}, lq.inters...),
|
||||
predicates: append([]predicate.Logentry{}, lq.predicates...),
|
||||
withProject: lq.withProject.Clone(),
|
||||
// clone intermediate query.
|
||||
sql: lq.sql.Clone(),
|
||||
path: lq.path,
|
||||
}
|
||||
}
|
||||
|
||||
// WithProject tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "project" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (lq *LogentryQuery) WithProject(opts ...func(*ProjectQuery)) *LogentryQuery {
|
||||
query := (&ProjectClient{config: lq.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
lq.withProject = query
|
||||
return lq
|
||||
}
|
||||
|
||||
// GroupBy is used to group vertices by one or more fields/columns.
|
||||
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// Date time.Time `json:"date,omitempty"`
|
||||
// Count int `json:"count,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.Logentry.Query().
|
||||
// GroupBy(logentry.FieldDate).
|
||||
// Aggregate(ent.Count()).
|
||||
// Scan(ctx, &v)
|
||||
func (lq *LogentryQuery) GroupBy(field string, fields ...string) *LogentryGroupBy {
|
||||
lq.ctx.Fields = append([]string{field}, fields...)
|
||||
grbuild := &LogentryGroupBy{build: lq}
|
||||
grbuild.flds = &lq.ctx.Fields
|
||||
grbuild.label = logentry.Label
|
||||
grbuild.scan = grbuild.Scan
|
||||
return grbuild
|
||||
}
|
||||
|
||||
// Select allows the selection one or more fields/columns for the given query,
|
||||
// instead of selecting all fields in the entity.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// Date time.Time `json:"date,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.Logentry.Query().
|
||||
// Select(logentry.FieldDate).
|
||||
// Scan(ctx, &v)
|
||||
func (lq *LogentryQuery) Select(fields ...string) *LogentrySelect {
|
||||
lq.ctx.Fields = append(lq.ctx.Fields, fields...)
|
||||
sbuild := &LogentrySelect{LogentryQuery: lq}
|
||||
sbuild.label = logentry.Label
|
||||
sbuild.flds, sbuild.scan = &lq.ctx.Fields, sbuild.Scan
|
||||
return sbuild
|
||||
}
|
||||
|
||||
// Aggregate returns a LogentrySelect configured with the given aggregations.
|
||||
func (lq *LogentryQuery) Aggregate(fns ...AggregateFunc) *LogentrySelect {
|
||||
return lq.Select().Aggregate(fns...)
|
||||
}
|
||||
|
||||
func (lq *LogentryQuery) prepareQuery(ctx context.Context) error {
|
||||
for _, inter := range lq.inters {
|
||||
if inter == nil {
|
||||
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||
}
|
||||
if trv, ok := inter.(Traverser); ok {
|
||||
if err := trv.Traverse(ctx, lq); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, f := range lq.ctx.Fields {
|
||||
if !logentry.ValidColumn(f) {
|
||||
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
}
|
||||
if lq.path != nil {
|
||||
prev, err := lq.path(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lq.sql = prev
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lq *LogentryQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Logentry, error) {
|
||||
var (
|
||||
nodes = []*Logentry{}
|
||||
withFKs = lq.withFKs
|
||||
_spec = lq.querySpec()
|
||||
loadedTypes = [1]bool{
|
||||
lq.withProject != nil,
|
||||
}
|
||||
)
|
||||
if lq.withProject != nil {
|
||||
withFKs = true
|
||||
}
|
||||
if withFKs {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, logentry.ForeignKeys...)
|
||||
}
|
||||
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||
return (*Logentry).scanValues(nil, columns)
|
||||
}
|
||||
_spec.Assign = func(columns []string, values []any) error {
|
||||
node := &Logentry{config: lq.config}
|
||||
nodes = append(nodes, node)
|
||||
node.Edges.loadedTypes = loadedTypes
|
||||
return node.assignValues(columns, values)
|
||||
}
|
||||
for i := range hooks {
|
||||
hooks[i](ctx, _spec)
|
||||
}
|
||||
if err := sqlgraph.QueryNodes(ctx, lq.driver, _spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nodes, nil
|
||||
}
|
||||
if query := lq.withProject; query != nil {
|
||||
if err := lq.loadProject(ctx, query, nodes, nil,
|
||||
func(n *Logentry, e *Project) { n.Edges.Project = e }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (lq *LogentryQuery) loadProject(ctx context.Context, query *ProjectQuery, nodes []*Logentry, init func(*Logentry), assign func(*Logentry, *Project)) error {
|
||||
ids := make([]int, 0, len(nodes))
|
||||
nodeids := make(map[int][]*Logentry)
|
||||
for i := range nodes {
|
||||
if nodes[i].project_logentries == nil {
|
||||
continue
|
||||
}
|
||||
fk := *nodes[i].project_logentries
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
query.Where(project.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
nodes, ok := nodeids[n.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "project_logentries" returned %v`, n.ID)
|
||||
}
|
||||
for i := range nodes {
|
||||
assign(nodes[i], n)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lq *LogentryQuery) sqlCount(ctx context.Context) (int, error) {
|
||||
_spec := lq.querySpec()
|
||||
_spec.Node.Columns = lq.ctx.Fields
|
||||
if len(lq.ctx.Fields) > 0 {
|
||||
_spec.Unique = lq.ctx.Unique != nil && *lq.ctx.Unique
|
||||
}
|
||||
return sqlgraph.CountNodes(ctx, lq.driver, _spec)
|
||||
}
|
||||
|
||||
func (lq *LogentryQuery) querySpec() *sqlgraph.QuerySpec {
|
||||
_spec := sqlgraph.NewQuerySpec(logentry.Table, logentry.Columns, sqlgraph.NewFieldSpec(logentry.FieldID, field.TypeInt))
|
||||
_spec.From = lq.sql
|
||||
if unique := lq.ctx.Unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
} else if lq.path != nil {
|
||||
_spec.Unique = true
|
||||
}
|
||||
if fields := lq.ctx.Fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, logentry.FieldID)
|
||||
for i := range fields {
|
||||
if fields[i] != logentry.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := lq.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if limit := lq.ctx.Limit; limit != nil {
|
||||
_spec.Limit = *limit
|
||||
}
|
||||
if offset := lq.ctx.Offset; offset != nil {
|
||||
_spec.Offset = *offset
|
||||
}
|
||||
if ps := lq.order; len(ps) > 0 {
|
||||
_spec.Order = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
return _spec
|
||||
}
|
||||
|
||||
func (lq *LogentryQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(lq.driver.Dialect())
|
||||
t1 := builder.Table(logentry.Table)
|
||||
columns := lq.ctx.Fields
|
||||
if len(columns) == 0 {
|
||||
columns = logentry.Columns
|
||||
}
|
||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||
if lq.sql != nil {
|
||||
selector = lq.sql
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
if lq.ctx.Unique != nil && *lq.ctx.Unique {
|
||||
selector.Distinct()
|
||||
}
|
||||
for _, p := range lq.predicates {
|
||||
p(selector)
|
||||
}
|
||||
for _, p := range lq.order {
|
||||
p(selector)
|
||||
}
|
||||
if offset := lq.ctx.Offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
// with default value, and override it below if needed.
|
||||
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||
}
|
||||
if limit := lq.ctx.Limit; limit != nil {
|
||||
selector.Limit(*limit)
|
||||
}
|
||||
return selector
|
||||
}
|
||||
|
||||
// LogentryGroupBy is the group-by builder for Logentry entities.
|
||||
type LogentryGroupBy struct {
|
||||
selector
|
||||
build *LogentryQuery
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the group-by query.
|
||||
func (lgb *LogentryGroupBy) Aggregate(fns ...AggregateFunc) *LogentryGroupBy {
|
||||
lgb.fns = append(lgb.fns, fns...)
|
||||
return lgb
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (lgb *LogentryGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, lgb.build.ctx, "GroupBy")
|
||||
if err := lgb.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*LogentryQuery, *LogentryGroupBy](ctx, lgb.build, lgb, lgb.build.inters, v)
|
||||
}
|
||||
|
||||
func (lgb *LogentryGroupBy) sqlScan(ctx context.Context, root *LogentryQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx).Select()
|
||||
aggregation := make([]string, 0, len(lgb.fns))
|
||||
for _, fn := range lgb.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
if len(selector.SelectedColumns()) == 0 {
|
||||
columns := make([]string, 0, len(*lgb.flds)+len(lgb.fns))
|
||||
for _, f := range *lgb.flds {
|
||||
columns = append(columns, selector.C(f))
|
||||
}
|
||||
columns = append(columns, aggregation...)
|
||||
selector.Select(columns...)
|
||||
}
|
||||
selector.GroupBy(selector.Columns(*lgb.flds...)...)
|
||||
if err := selector.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := lgb.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
// LogentrySelect is the builder for selecting fields of Logentry entities.
|
||||
type LogentrySelect struct {
|
||||
*LogentryQuery
|
||||
selector
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the selector query.
|
||||
func (ls *LogentrySelect) Aggregate(fns ...AggregateFunc) *LogentrySelect {
|
||||
ls.fns = append(ls.fns, fns...)
|
||||
return ls
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (ls *LogentrySelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, ls.ctx, "Select")
|
||||
if err := ls.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*LogentryQuery, *LogentrySelect](ctx, ls.LogentryQuery, ls, ls.inters, v)
|
||||
}
|
||||
|
||||
func (ls *LogentrySelect) sqlScan(ctx context.Context, root *LogentryQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx)
|
||||
aggregation := make([]string, 0, len(ls.fns))
|
||||
for _, fn := range ls.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
switch n := len(*ls.selector.flds); {
|
||||
case n == 0 && len(aggregation) > 0:
|
||||
selector.Select(aggregation...)
|
||||
case n != 0 && len(aggregation) > 0:
|
||||
selector.AppendSelect(aggregation...)
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := ls.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
318
ent/logentry_update.go
Normal file
318
ent/logentry_update.go
Normal file
@ -0,0 +1,318 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent/logentry"
|
||||
"code.icod.de/dalu/gomanager/ent/predicate"
|
||||
"code.icod.de/dalu/gomanager/ent/project"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
)
|
||||
|
||||
// LogentryUpdate is the builder for updating Logentry entities.
|
||||
type LogentryUpdate struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *LogentryMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the LogentryUpdate builder.
|
||||
func (lu *LogentryUpdate) Where(ps ...predicate.Logentry) *LogentryUpdate {
|
||||
lu.mutation.Where(ps...)
|
||||
return lu
|
||||
}
|
||||
|
||||
// SetContent sets the "content" field.
|
||||
func (lu *LogentryUpdate) SetContent(s string) *LogentryUpdate {
|
||||
lu.mutation.SetContent(s)
|
||||
return lu
|
||||
}
|
||||
|
||||
// SetNillableContent sets the "content" field if the given value is not nil.
|
||||
func (lu *LogentryUpdate) SetNillableContent(s *string) *LogentryUpdate {
|
||||
if s != nil {
|
||||
lu.SetContent(*s)
|
||||
}
|
||||
return lu
|
||||
}
|
||||
|
||||
// SetProjectID sets the "project" edge to the Project entity by ID.
|
||||
func (lu *LogentryUpdate) SetProjectID(id int) *LogentryUpdate {
|
||||
lu.mutation.SetProjectID(id)
|
||||
return lu
|
||||
}
|
||||
|
||||
// SetNillableProjectID sets the "project" edge to the Project entity by ID if the given value is not nil.
|
||||
func (lu *LogentryUpdate) SetNillableProjectID(id *int) *LogentryUpdate {
|
||||
if id != nil {
|
||||
lu = lu.SetProjectID(*id)
|
||||
}
|
||||
return lu
|
||||
}
|
||||
|
||||
// SetProject sets the "project" edge to the Project entity.
|
||||
func (lu *LogentryUpdate) SetProject(p *Project) *LogentryUpdate {
|
||||
return lu.SetProjectID(p.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the LogentryMutation object of the builder.
|
||||
func (lu *LogentryUpdate) Mutation() *LogentryMutation {
|
||||
return lu.mutation
|
||||
}
|
||||
|
||||
// ClearProject clears the "project" edge to the Project entity.
|
||||
func (lu *LogentryUpdate) ClearProject() *LogentryUpdate {
|
||||
lu.mutation.ClearProject()
|
||||
return lu
|
||||
}
|
||||
|
||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||
func (lu *LogentryUpdate) Save(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, lu.sqlSave, lu.mutation, lu.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (lu *LogentryUpdate) SaveX(ctx context.Context) int {
|
||||
affected, err := lu.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return affected
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (lu *LogentryUpdate) Exec(ctx context.Context) error {
|
||||
_, err := lu.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (lu *LogentryUpdate) ExecX(ctx context.Context) {
|
||||
if err := lu.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (lu *LogentryUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
_spec := sqlgraph.NewUpdateSpec(logentry.Table, logentry.Columns, sqlgraph.NewFieldSpec(logentry.FieldID, field.TypeInt))
|
||||
if ps := lu.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := lu.mutation.Content(); ok {
|
||||
_spec.SetField(logentry.FieldContent, field.TypeString, value)
|
||||
}
|
||||
if lu.mutation.ProjectCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: logentry.ProjectTable,
|
||||
Columns: []string{logentry.ProjectColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(project.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := lu.mutation.ProjectIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: logentry.ProjectTable,
|
||||
Columns: []string{logentry.ProjectColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(project.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if n, err = sqlgraph.UpdateNodes(ctx, lu.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{logentry.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
lu.mutation.done = true
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// LogentryUpdateOne is the builder for updating a single Logentry entity.
|
||||
type LogentryUpdateOne struct {
|
||||
config
|
||||
fields []string
|
||||
hooks []Hook
|
||||
mutation *LogentryMutation
|
||||
}
|
||||
|
||||
// SetContent sets the "content" field.
|
||||
func (luo *LogentryUpdateOne) SetContent(s string) *LogentryUpdateOne {
|
||||
luo.mutation.SetContent(s)
|
||||
return luo
|
||||
}
|
||||
|
||||
// SetNillableContent sets the "content" field if the given value is not nil.
|
||||
func (luo *LogentryUpdateOne) SetNillableContent(s *string) *LogentryUpdateOne {
|
||||
if s != nil {
|
||||
luo.SetContent(*s)
|
||||
}
|
||||
return luo
|
||||
}
|
||||
|
||||
// SetProjectID sets the "project" edge to the Project entity by ID.
|
||||
func (luo *LogentryUpdateOne) SetProjectID(id int) *LogentryUpdateOne {
|
||||
luo.mutation.SetProjectID(id)
|
||||
return luo
|
||||
}
|
||||
|
||||
// SetNillableProjectID sets the "project" edge to the Project entity by ID if the given value is not nil.
|
||||
func (luo *LogentryUpdateOne) SetNillableProjectID(id *int) *LogentryUpdateOne {
|
||||
if id != nil {
|
||||
luo = luo.SetProjectID(*id)
|
||||
}
|
||||
return luo
|
||||
}
|
||||
|
||||
// SetProject sets the "project" edge to the Project entity.
|
||||
func (luo *LogentryUpdateOne) SetProject(p *Project) *LogentryUpdateOne {
|
||||
return luo.SetProjectID(p.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the LogentryMutation object of the builder.
|
||||
func (luo *LogentryUpdateOne) Mutation() *LogentryMutation {
|
||||
return luo.mutation
|
||||
}
|
||||
|
||||
// ClearProject clears the "project" edge to the Project entity.
|
||||
func (luo *LogentryUpdateOne) ClearProject() *LogentryUpdateOne {
|
||||
luo.mutation.ClearProject()
|
||||
return luo
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the LogentryUpdate builder.
|
||||
func (luo *LogentryUpdateOne) Where(ps ...predicate.Logentry) *LogentryUpdateOne {
|
||||
luo.mutation.Where(ps...)
|
||||
return luo
|
||||
}
|
||||
|
||||
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||
// The default is selecting all fields defined in the entity schema.
|
||||
func (luo *LogentryUpdateOne) Select(field string, fields ...string) *LogentryUpdateOne {
|
||||
luo.fields = append([]string{field}, fields...)
|
||||
return luo
|
||||
}
|
||||
|
||||
// Save executes the query and returns the updated Logentry entity.
|
||||
func (luo *LogentryUpdateOne) Save(ctx context.Context) (*Logentry, error) {
|
||||
return withHooks(ctx, luo.sqlSave, luo.mutation, luo.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (luo *LogentryUpdateOne) SaveX(ctx context.Context) *Logentry {
|
||||
node, err := luo.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// Exec executes the query on the entity.
|
||||
func (luo *LogentryUpdateOne) Exec(ctx context.Context) error {
|
||||
_, err := luo.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (luo *LogentryUpdateOne) ExecX(ctx context.Context) {
|
||||
if err := luo.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (luo *LogentryUpdateOne) sqlSave(ctx context.Context) (_node *Logentry, err error) {
|
||||
_spec := sqlgraph.NewUpdateSpec(logentry.Table, logentry.Columns, sqlgraph.NewFieldSpec(logentry.FieldID, field.TypeInt))
|
||||
id, ok := luo.mutation.ID()
|
||||
if !ok {
|
||||
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Logentry.id" for update`)}
|
||||
}
|
||||
_spec.Node.ID.Value = id
|
||||
if fields := luo.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, logentry.FieldID)
|
||||
for _, f := range fields {
|
||||
if !logentry.ValidColumn(f) {
|
||||
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
if f != logentry.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := luo.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := luo.mutation.Content(); ok {
|
||||
_spec.SetField(logentry.FieldContent, field.TypeString, value)
|
||||
}
|
||||
if luo.mutation.ProjectCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: logentry.ProjectTable,
|
||||
Columns: []string{logentry.ProjectColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(project.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := luo.mutation.ProjectIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: logentry.ProjectTable,
|
||||
Columns: []string{logentry.ProjectColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(project.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
_node = &Logentry{config: luo.config}
|
||||
_spec.Assign = _node.assignValues
|
||||
_spec.ScanValues = _node.scanValues
|
||||
if err = sqlgraph.UpdateNode(ctx, luo.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{logentry.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
luo.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
64
ent/migrate/migrate.go
Normal file
64
ent/migrate/migrate.go
Normal file
@ -0,0 +1,64 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"entgo.io/ent/dialect"
|
||||
"entgo.io/ent/dialect/sql/schema"
|
||||
)
|
||||
|
||||
var (
|
||||
// WithGlobalUniqueID sets the universal ids options to the migration.
|
||||
// If this option is enabled, ent migration will allocate a 1<<32 range
|
||||
// for the ids of each entity (table).
|
||||
// Note that this option cannot be applied on tables that already exist.
|
||||
WithGlobalUniqueID = schema.WithGlobalUniqueID
|
||||
// WithDropColumn sets the drop column option to the migration.
|
||||
// If this option is enabled, ent migration will drop old columns
|
||||
// that were used for both fields and edges. This defaults to false.
|
||||
WithDropColumn = schema.WithDropColumn
|
||||
// WithDropIndex sets the drop index option to the migration.
|
||||
// If this option is enabled, ent migration will drop old indexes
|
||||
// that were defined in the schema. This defaults to false.
|
||||
// Note that unique constraints are defined using `UNIQUE INDEX`,
|
||||
// and therefore, it's recommended to enable this option to get more
|
||||
// flexibility in the schema changes.
|
||||
WithDropIndex = schema.WithDropIndex
|
||||
// WithForeignKeys enables creating foreign-key in schema DDL. This defaults to true.
|
||||
WithForeignKeys = schema.WithForeignKeys
|
||||
)
|
||||
|
||||
// Schema is the API for creating, migrating and dropping a schema.
|
||||
type Schema struct {
|
||||
drv dialect.Driver
|
||||
}
|
||||
|
||||
// NewSchema creates a new schema client.
|
||||
func NewSchema(drv dialect.Driver) *Schema { return &Schema{drv: drv} }
|
||||
|
||||
// Create creates all schema resources.
|
||||
func (s *Schema) Create(ctx context.Context, opts ...schema.MigrateOption) error {
|
||||
return Create(ctx, s, Tables, opts...)
|
||||
}
|
||||
|
||||
// Create creates all table resources using the given schema driver.
|
||||
func Create(ctx context.Context, s *Schema, tables []*schema.Table, opts ...schema.MigrateOption) error {
|
||||
migrate, err := schema.NewMigrate(s.drv, opts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ent/migrate: %w", err)
|
||||
}
|
||||
return migrate.Create(ctx, tables...)
|
||||
}
|
||||
|
||||
// WriteTo writes the schema changes to w instead of running them against the database.
|
||||
//
|
||||
// if err := client.Schema.WriteTo(context.Background(), os.Stdout); err != nil {
|
||||
// log.Fatal(err)
|
||||
// }
|
||||
func (s *Schema) WriteTo(ctx context.Context, w io.Writer, opts ...schema.MigrateOption) error {
|
||||
return Create(ctx, &Schema{drv: &schema.WriteDriver{Writer: w, Driver: s.drv}}, Tables, opts...)
|
||||
}
|
59
ent/migrate/schema.go
Normal file
59
ent/migrate/schema.go
Normal file
@ -0,0 +1,59 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"entgo.io/ent/dialect/sql/schema"
|
||||
"entgo.io/ent/schema/field"
|
||||
)
|
||||
|
||||
var (
|
||||
// LogentriesColumns holds the columns for the "logentries" table.
|
||||
LogentriesColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt, Increment: true},
|
||||
{Name: "date", Type: field.TypeTime},
|
||||
{Name: "content", Type: field.TypeString, Size: 2147483647},
|
||||
{Name: "project_logentries", Type: field.TypeInt, Nullable: true},
|
||||
}
|
||||
// LogentriesTable holds the schema information for the "logentries" table.
|
||||
LogentriesTable = &schema.Table{
|
||||
Name: "logentries",
|
||||
Columns: LogentriesColumns,
|
||||
PrimaryKey: []*schema.Column{LogentriesColumns[0]},
|
||||
ForeignKeys: []*schema.ForeignKey{
|
||||
{
|
||||
Symbol: "logentries_projects_logentries",
|
||||
Columns: []*schema.Column{LogentriesColumns[3]},
|
||||
RefColumns: []*schema.Column{ProjectsColumns[0]},
|
||||
OnDelete: schema.SetNull,
|
||||
},
|
||||
},
|
||||
}
|
||||
// ProjectsColumns holds the columns for the "projects" table.
|
||||
ProjectsColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt, Increment: true},
|
||||
{Name: "create_time", Type: field.TypeTime},
|
||||
{Name: "user", Type: field.TypeString},
|
||||
{Name: "group", Type: field.TypeString},
|
||||
{Name: "root_path", Type: field.TypeString},
|
||||
{Name: "service_name", Type: field.TypeString},
|
||||
{Name: "binary_path", Type: field.TypeString},
|
||||
{Name: "move_to_target", Type: field.TypeBool, Default: false},
|
||||
{Name: "binary_target_path", Type: field.TypeString, Nullable: true},
|
||||
}
|
||||
// ProjectsTable holds the schema information for the "projects" table.
|
||||
ProjectsTable = &schema.Table{
|
||||
Name: "projects",
|
||||
Columns: ProjectsColumns,
|
||||
PrimaryKey: []*schema.Column{ProjectsColumns[0]},
|
||||
}
|
||||
// Tables holds all the tables in the schema.
|
||||
Tables = []*schema.Table{
|
||||
LogentriesTable,
|
||||
ProjectsTable,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
LogentriesTable.ForeignKeys[0].RefTable = ProjectsTable
|
||||
}
|
1308
ent/mutation.go
Normal file
1308
ent/mutation.go
Normal file
File diff suppressed because it is too large
Load Diff
13
ent/predicate/predicate.go
Normal file
13
ent/predicate/predicate.go
Normal file
@ -0,0 +1,13 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package predicate
|
||||
|
||||
import (
|
||||
"entgo.io/ent/dialect/sql"
|
||||
)
|
||||
|
||||
// Logentry is the predicate function for logentry builders.
|
||||
type Logentry func(*sql.Selector)
|
||||
|
||||
// Project is the predicate function for project builders.
|
||||
type Project func(*sql.Selector)
|
211
ent/project.go
Normal file
211
ent/project.go
Normal file
@ -0,0 +1,211 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent/project"
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
)
|
||||
|
||||
// Project is the model entity for the Project schema.
|
||||
type Project struct {
|
||||
config `json:"-"`
|
||||
// ID of the ent.
|
||||
ID int `json:"id,omitempty"`
|
||||
// CreateTime holds the value of the "create_time" field.
|
||||
CreateTime time.Time `json:"create_time,omitempty"`
|
||||
// User holds the value of the "user" field.
|
||||
User string `json:"user,omitempty"`
|
||||
// Group holds the value of the "group" field.
|
||||
Group string `json:"group,omitempty"`
|
||||
// RootPath holds the value of the "root_path" field.
|
||||
RootPath string `json:"root_path,omitempty"`
|
||||
// ServiceName holds the value of the "service_name" field.
|
||||
ServiceName string `json:"service_name,omitempty"`
|
||||
// BinaryPath holds the value of the "binary_path" field.
|
||||
BinaryPath string `json:"binary_path,omitempty"`
|
||||
// MoveToTarget holds the value of the "move_to_target" field.
|
||||
MoveToTarget bool `json:"move_to_target,omitempty"`
|
||||
// BinaryTargetPath holds the value of the "binary_target_path" field.
|
||||
BinaryTargetPath string `json:"binary_target_path,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the ProjectQuery when eager-loading is set.
|
||||
Edges ProjectEdges `json:"edges"`
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// ProjectEdges holds the relations/edges for other nodes in the graph.
|
||||
type ProjectEdges struct {
|
||||
// Logentries holds the value of the logentries edge.
|
||||
Logentries []*Logentry `json:"logentries,omitempty"`
|
||||
// loadedTypes holds the information for reporting if a
|
||||
// type was loaded (or requested) in eager-loading or not.
|
||||
loadedTypes [1]bool
|
||||
}
|
||||
|
||||
// LogentriesOrErr returns the Logentries value or an error if the edge
|
||||
// was not loaded in eager-loading.
|
||||
func (e ProjectEdges) LogentriesOrErr() ([]*Logentry, error) {
|
||||
if e.loadedTypes[0] {
|
||||
return e.Logentries, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "logentries"}
|
||||
}
|
||||
|
||||
// scanValues returns the types for scanning values from sql.Rows.
|
||||
func (*Project) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case project.FieldMoveToTarget:
|
||||
values[i] = new(sql.NullBool)
|
||||
case project.FieldID:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case project.FieldUser, project.FieldGroup, project.FieldRootPath, project.FieldServiceName, project.FieldBinaryPath, project.FieldBinaryTargetPath:
|
||||
values[i] = new(sql.NullString)
|
||||
case project.FieldCreateTime:
|
||||
values[i] = new(sql.NullTime)
|
||||
default:
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||
// to the Project fields.
|
||||
func (pr *Project) assignValues(columns []string, values []any) error {
|
||||
if m, n := len(values), len(columns); m < n {
|
||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||
}
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case project.FieldID:
|
||||
value, ok := values[i].(*sql.NullInt64)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected type %T for field id", value)
|
||||
}
|
||||
pr.ID = int(value.Int64)
|
||||
case project.FieldCreateTime:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field create_time", values[i])
|
||||
} else if value.Valid {
|
||||
pr.CreateTime = value.Time
|
||||
}
|
||||
case project.FieldUser:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field user", values[i])
|
||||
} else if value.Valid {
|
||||
pr.User = value.String
|
||||
}
|
||||
case project.FieldGroup:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field group", values[i])
|
||||
} else if value.Valid {
|
||||
pr.Group = value.String
|
||||
}
|
||||
case project.FieldRootPath:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field root_path", values[i])
|
||||
} else if value.Valid {
|
||||
pr.RootPath = value.String
|
||||
}
|
||||
case project.FieldServiceName:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field service_name", values[i])
|
||||
} else if value.Valid {
|
||||
pr.ServiceName = value.String
|
||||
}
|
||||
case project.FieldBinaryPath:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field binary_path", values[i])
|
||||
} else if value.Valid {
|
||||
pr.BinaryPath = value.String
|
||||
}
|
||||
case project.FieldMoveToTarget:
|
||||
if value, ok := values[i].(*sql.NullBool); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field move_to_target", values[i])
|
||||
} else if value.Valid {
|
||||
pr.MoveToTarget = value.Bool
|
||||
}
|
||||
case project.FieldBinaryTargetPath:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field binary_target_path", values[i])
|
||||
} else if value.Valid {
|
||||
pr.BinaryTargetPath = value.String
|
||||
}
|
||||
default:
|
||||
pr.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the Project.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (pr *Project) Value(name string) (ent.Value, error) {
|
||||
return pr.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryLogentries queries the "logentries" edge of the Project entity.
|
||||
func (pr *Project) QueryLogentries() *LogentryQuery {
|
||||
return NewProjectClient(pr.config).QueryLogentries(pr)
|
||||
}
|
||||
|
||||
// Update returns a builder for updating this Project.
|
||||
// Note that you need to call Project.Unwrap() before calling this method if this Project
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
func (pr *Project) Update() *ProjectUpdateOne {
|
||||
return NewProjectClient(pr.config).UpdateOne(pr)
|
||||
}
|
||||
|
||||
// Unwrap unwraps the Project entity that was returned from a transaction after it was closed,
|
||||
// so that all future queries will be executed through the driver which created the transaction.
|
||||
func (pr *Project) Unwrap() *Project {
|
||||
_tx, ok := pr.config.driver.(*txDriver)
|
||||
if !ok {
|
||||
panic("ent: Project is not a transactional entity")
|
||||
}
|
||||
pr.config.driver = _tx.drv
|
||||
return pr
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer.
|
||||
func (pr *Project) String() string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("Project(")
|
||||
builder.WriteString(fmt.Sprintf("id=%v, ", pr.ID))
|
||||
builder.WriteString("create_time=")
|
||||
builder.WriteString(pr.CreateTime.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("user=")
|
||||
builder.WriteString(pr.User)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("group=")
|
||||
builder.WriteString(pr.Group)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("root_path=")
|
||||
builder.WriteString(pr.RootPath)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("service_name=")
|
||||
builder.WriteString(pr.ServiceName)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("binary_path=")
|
||||
builder.WriteString(pr.BinaryPath)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("move_to_target=")
|
||||
builder.WriteString(fmt.Sprintf("%v", pr.MoveToTarget))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("binary_target_path=")
|
||||
builder.WriteString(pr.BinaryTargetPath)
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// Projects is a parsable slice of Project.
|
||||
type Projects []*Project
|
143
ent/project/project.go
Normal file
143
ent/project/project.go
Normal file
@ -0,0 +1,143 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package project
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
const (
|
||||
// Label holds the string label denoting the project type in the database.
|
||||
Label = "project"
|
||||
// FieldID holds the string denoting the id field in the database.
|
||||
FieldID = "id"
|
||||
// FieldCreateTime holds the string denoting the create_time field in the database.
|
||||
FieldCreateTime = "create_time"
|
||||
// FieldUser holds the string denoting the user field in the database.
|
||||
FieldUser = "user"
|
||||
// FieldGroup holds the string denoting the group field in the database.
|
||||
FieldGroup = "group"
|
||||
// FieldRootPath holds the string denoting the root_path field in the database.
|
||||
FieldRootPath = "root_path"
|
||||
// FieldServiceName holds the string denoting the service_name field in the database.
|
||||
FieldServiceName = "service_name"
|
||||
// FieldBinaryPath holds the string denoting the binary_path field in the database.
|
||||
FieldBinaryPath = "binary_path"
|
||||
// FieldMoveToTarget holds the string denoting the move_to_target field in the database.
|
||||
FieldMoveToTarget = "move_to_target"
|
||||
// FieldBinaryTargetPath holds the string denoting the binary_target_path field in the database.
|
||||
FieldBinaryTargetPath = "binary_target_path"
|
||||
// EdgeLogentries holds the string denoting the logentries edge name in mutations.
|
||||
EdgeLogentries = "logentries"
|
||||
// Table holds the table name of the project in the database.
|
||||
Table = "projects"
|
||||
// LogentriesTable is the table that holds the logentries relation/edge.
|
||||
LogentriesTable = "logentries"
|
||||
// LogentriesInverseTable is the table name for the Logentry entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "logentry" package.
|
||||
LogentriesInverseTable = "logentries"
|
||||
// LogentriesColumn is the table column denoting the logentries relation/edge.
|
||||
LogentriesColumn = "project_logentries"
|
||||
)
|
||||
|
||||
// Columns holds all SQL columns for project fields.
|
||||
var Columns = []string{
|
||||
FieldID,
|
||||
FieldCreateTime,
|
||||
FieldUser,
|
||||
FieldGroup,
|
||||
FieldRootPath,
|
||||
FieldServiceName,
|
||||
FieldBinaryPath,
|
||||
FieldMoveToTarget,
|
||||
FieldBinaryTargetPath,
|
||||
}
|
||||
|
||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||
func ValidColumn(column string) bool {
|
||||
for i := range Columns {
|
||||
if column == Columns[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultCreateTime holds the default value on creation for the "create_time" field.
|
||||
DefaultCreateTime func() time.Time
|
||||
// DefaultMoveToTarget holds the default value on creation for the "move_to_target" field.
|
||||
DefaultMoveToTarget bool
|
||||
)
|
||||
|
||||
// OrderOption defines the ordering options for the Project queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreateTime orders the results by the create_time field.
|
||||
func ByCreateTime(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreateTime, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUser orders the results by the user field.
|
||||
func ByUser(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUser, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByGroup orders the results by the group field.
|
||||
func ByGroup(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldGroup, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByRootPath orders the results by the root_path field.
|
||||
func ByRootPath(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldRootPath, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByServiceName orders the results by the service_name field.
|
||||
func ByServiceName(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldServiceName, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByBinaryPath orders the results by the binary_path field.
|
||||
func ByBinaryPath(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldBinaryPath, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByMoveToTarget orders the results by the move_to_target field.
|
||||
func ByMoveToTarget(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldMoveToTarget, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByBinaryTargetPath orders the results by the binary_target_path field.
|
||||
func ByBinaryTargetPath(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldBinaryTargetPath, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByLogentriesCount orders the results by logentries count.
|
||||
func ByLogentriesCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newLogentriesStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByLogentries orders the results by logentries terms.
|
||||
func ByLogentries(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newLogentriesStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
func newLogentriesStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(LogentriesInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, LogentriesTable, LogentriesColumn),
|
||||
)
|
||||
}
|
584
ent/project/where.go
Normal file
584
ent/project/where.go
Normal file
@ -0,0 +1,584 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package project
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent/predicate"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
// ID filters vertices based on their ID field.
|
||||
func ID(id int) predicate.Project {
|
||||
return predicate.Project(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDEQ applies the EQ predicate on the ID field.
|
||||
func IDEQ(id int) predicate.Project {
|
||||
return predicate.Project(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDNEQ applies the NEQ predicate on the ID field.
|
||||
func IDNEQ(id int) predicate.Project {
|
||||
return predicate.Project(sql.FieldNEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDIn applies the In predicate on the ID field.
|
||||
func IDIn(ids ...int) predicate.Project {
|
||||
return predicate.Project(sql.FieldIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDNotIn applies the NotIn predicate on the ID field.
|
||||
func IDNotIn(ids ...int) predicate.Project {
|
||||
return predicate.Project(sql.FieldNotIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDGT applies the GT predicate on the ID field.
|
||||
func IDGT(id int) predicate.Project {
|
||||
return predicate.Project(sql.FieldGT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDGTE applies the GTE predicate on the ID field.
|
||||
func IDGTE(id int) predicate.Project {
|
||||
return predicate.Project(sql.FieldGTE(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLT applies the LT predicate on the ID field.
|
||||
func IDLT(id int) predicate.Project {
|
||||
return predicate.Project(sql.FieldLT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLTE applies the LTE predicate on the ID field.
|
||||
func IDLTE(id int) predicate.Project {
|
||||
return predicate.Project(sql.FieldLTE(FieldID, id))
|
||||
}
|
||||
|
||||
// CreateTime applies equality check predicate on the "create_time" field. It's identical to CreateTimeEQ.
|
||||
func CreateTime(v time.Time) predicate.Project {
|
||||
return predicate.Project(sql.FieldEQ(FieldCreateTime, v))
|
||||
}
|
||||
|
||||
// User applies equality check predicate on the "user" field. It's identical to UserEQ.
|
||||
func User(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldEQ(FieldUser, v))
|
||||
}
|
||||
|
||||
// Group applies equality check predicate on the "group" field. It's identical to GroupEQ.
|
||||
func Group(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldEQ(FieldGroup, v))
|
||||
}
|
||||
|
||||
// RootPath applies equality check predicate on the "root_path" field. It's identical to RootPathEQ.
|
||||
func RootPath(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldEQ(FieldRootPath, v))
|
||||
}
|
||||
|
||||
// ServiceName applies equality check predicate on the "service_name" field. It's identical to ServiceNameEQ.
|
||||
func ServiceName(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldEQ(FieldServiceName, v))
|
||||
}
|
||||
|
||||
// BinaryPath applies equality check predicate on the "binary_path" field. It's identical to BinaryPathEQ.
|
||||
func BinaryPath(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldEQ(FieldBinaryPath, v))
|
||||
}
|
||||
|
||||
// MoveToTarget applies equality check predicate on the "move_to_target" field. It's identical to MoveToTargetEQ.
|
||||
func MoveToTarget(v bool) predicate.Project {
|
||||
return predicate.Project(sql.FieldEQ(FieldMoveToTarget, v))
|
||||
}
|
||||
|
||||
// BinaryTargetPath applies equality check predicate on the "binary_target_path" field. It's identical to BinaryTargetPathEQ.
|
||||
func BinaryTargetPath(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldEQ(FieldBinaryTargetPath, v))
|
||||
}
|
||||
|
||||
// CreateTimeEQ applies the EQ predicate on the "create_time" field.
|
||||
func CreateTimeEQ(v time.Time) predicate.Project {
|
||||
return predicate.Project(sql.FieldEQ(FieldCreateTime, v))
|
||||
}
|
||||
|
||||
// CreateTimeNEQ applies the NEQ predicate on the "create_time" field.
|
||||
func CreateTimeNEQ(v time.Time) predicate.Project {
|
||||
return predicate.Project(sql.FieldNEQ(FieldCreateTime, v))
|
||||
}
|
||||
|
||||
// CreateTimeIn applies the In predicate on the "create_time" field.
|
||||
func CreateTimeIn(vs ...time.Time) predicate.Project {
|
||||
return predicate.Project(sql.FieldIn(FieldCreateTime, vs...))
|
||||
}
|
||||
|
||||
// CreateTimeNotIn applies the NotIn predicate on the "create_time" field.
|
||||
func CreateTimeNotIn(vs ...time.Time) predicate.Project {
|
||||
return predicate.Project(sql.FieldNotIn(FieldCreateTime, vs...))
|
||||
}
|
||||
|
||||
// CreateTimeGT applies the GT predicate on the "create_time" field.
|
||||
func CreateTimeGT(v time.Time) predicate.Project {
|
||||
return predicate.Project(sql.FieldGT(FieldCreateTime, v))
|
||||
}
|
||||
|
||||
// CreateTimeGTE applies the GTE predicate on the "create_time" field.
|
||||
func CreateTimeGTE(v time.Time) predicate.Project {
|
||||
return predicate.Project(sql.FieldGTE(FieldCreateTime, v))
|
||||
}
|
||||
|
||||
// CreateTimeLT applies the LT predicate on the "create_time" field.
|
||||
func CreateTimeLT(v time.Time) predicate.Project {
|
||||
return predicate.Project(sql.FieldLT(FieldCreateTime, v))
|
||||
}
|
||||
|
||||
// CreateTimeLTE applies the LTE predicate on the "create_time" field.
|
||||
func CreateTimeLTE(v time.Time) predicate.Project {
|
||||
return predicate.Project(sql.FieldLTE(FieldCreateTime, v))
|
||||
}
|
||||
|
||||
// UserEQ applies the EQ predicate on the "user" field.
|
||||
func UserEQ(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldEQ(FieldUser, v))
|
||||
}
|
||||
|
||||
// UserNEQ applies the NEQ predicate on the "user" field.
|
||||
func UserNEQ(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldNEQ(FieldUser, v))
|
||||
}
|
||||
|
||||
// UserIn applies the In predicate on the "user" field.
|
||||
func UserIn(vs ...string) predicate.Project {
|
||||
return predicate.Project(sql.FieldIn(FieldUser, vs...))
|
||||
}
|
||||
|
||||
// UserNotIn applies the NotIn predicate on the "user" field.
|
||||
func UserNotIn(vs ...string) predicate.Project {
|
||||
return predicate.Project(sql.FieldNotIn(FieldUser, vs...))
|
||||
}
|
||||
|
||||
// UserGT applies the GT predicate on the "user" field.
|
||||
func UserGT(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldGT(FieldUser, v))
|
||||
}
|
||||
|
||||
// UserGTE applies the GTE predicate on the "user" field.
|
||||
func UserGTE(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldGTE(FieldUser, v))
|
||||
}
|
||||
|
||||
// UserLT applies the LT predicate on the "user" field.
|
||||
func UserLT(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldLT(FieldUser, v))
|
||||
}
|
||||
|
||||
// UserLTE applies the LTE predicate on the "user" field.
|
||||
func UserLTE(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldLTE(FieldUser, v))
|
||||
}
|
||||
|
||||
// UserContains applies the Contains predicate on the "user" field.
|
||||
func UserContains(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldContains(FieldUser, v))
|
||||
}
|
||||
|
||||
// UserHasPrefix applies the HasPrefix predicate on the "user" field.
|
||||
func UserHasPrefix(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldHasPrefix(FieldUser, v))
|
||||
}
|
||||
|
||||
// UserHasSuffix applies the HasSuffix predicate on the "user" field.
|
||||
func UserHasSuffix(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldHasSuffix(FieldUser, v))
|
||||
}
|
||||
|
||||
// UserEqualFold applies the EqualFold predicate on the "user" field.
|
||||
func UserEqualFold(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldEqualFold(FieldUser, v))
|
||||
}
|
||||
|
||||
// UserContainsFold applies the ContainsFold predicate on the "user" field.
|
||||
func UserContainsFold(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldContainsFold(FieldUser, v))
|
||||
}
|
||||
|
||||
// GroupEQ applies the EQ predicate on the "group" field.
|
||||
func GroupEQ(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldEQ(FieldGroup, v))
|
||||
}
|
||||
|
||||
// GroupNEQ applies the NEQ predicate on the "group" field.
|
||||
func GroupNEQ(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldNEQ(FieldGroup, v))
|
||||
}
|
||||
|
||||
// GroupIn applies the In predicate on the "group" field.
|
||||
func GroupIn(vs ...string) predicate.Project {
|
||||
return predicate.Project(sql.FieldIn(FieldGroup, vs...))
|
||||
}
|
||||
|
||||
// GroupNotIn applies the NotIn predicate on the "group" field.
|
||||
func GroupNotIn(vs ...string) predicate.Project {
|
||||
return predicate.Project(sql.FieldNotIn(FieldGroup, vs...))
|
||||
}
|
||||
|
||||
// GroupGT applies the GT predicate on the "group" field.
|
||||
func GroupGT(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldGT(FieldGroup, v))
|
||||
}
|
||||
|
||||
// GroupGTE applies the GTE predicate on the "group" field.
|
||||
func GroupGTE(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldGTE(FieldGroup, v))
|
||||
}
|
||||
|
||||
// GroupLT applies the LT predicate on the "group" field.
|
||||
func GroupLT(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldLT(FieldGroup, v))
|
||||
}
|
||||
|
||||
// GroupLTE applies the LTE predicate on the "group" field.
|
||||
func GroupLTE(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldLTE(FieldGroup, v))
|
||||
}
|
||||
|
||||
// GroupContains applies the Contains predicate on the "group" field.
|
||||
func GroupContains(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldContains(FieldGroup, v))
|
||||
}
|
||||
|
||||
// GroupHasPrefix applies the HasPrefix predicate on the "group" field.
|
||||
func GroupHasPrefix(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldHasPrefix(FieldGroup, v))
|
||||
}
|
||||
|
||||
// GroupHasSuffix applies the HasSuffix predicate on the "group" field.
|
||||
func GroupHasSuffix(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldHasSuffix(FieldGroup, v))
|
||||
}
|
||||
|
||||
// GroupEqualFold applies the EqualFold predicate on the "group" field.
|
||||
func GroupEqualFold(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldEqualFold(FieldGroup, v))
|
||||
}
|
||||
|
||||
// GroupContainsFold applies the ContainsFold predicate on the "group" field.
|
||||
func GroupContainsFold(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldContainsFold(FieldGroup, v))
|
||||
}
|
||||
|
||||
// RootPathEQ applies the EQ predicate on the "root_path" field.
|
||||
func RootPathEQ(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldEQ(FieldRootPath, v))
|
||||
}
|
||||
|
||||
// RootPathNEQ applies the NEQ predicate on the "root_path" field.
|
||||
func RootPathNEQ(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldNEQ(FieldRootPath, v))
|
||||
}
|
||||
|
||||
// RootPathIn applies the In predicate on the "root_path" field.
|
||||
func RootPathIn(vs ...string) predicate.Project {
|
||||
return predicate.Project(sql.FieldIn(FieldRootPath, vs...))
|
||||
}
|
||||
|
||||
// RootPathNotIn applies the NotIn predicate on the "root_path" field.
|
||||
func RootPathNotIn(vs ...string) predicate.Project {
|
||||
return predicate.Project(sql.FieldNotIn(FieldRootPath, vs...))
|
||||
}
|
||||
|
||||
// RootPathGT applies the GT predicate on the "root_path" field.
|
||||
func RootPathGT(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldGT(FieldRootPath, v))
|
||||
}
|
||||
|
||||
// RootPathGTE applies the GTE predicate on the "root_path" field.
|
||||
func RootPathGTE(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldGTE(FieldRootPath, v))
|
||||
}
|
||||
|
||||
// RootPathLT applies the LT predicate on the "root_path" field.
|
||||
func RootPathLT(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldLT(FieldRootPath, v))
|
||||
}
|
||||
|
||||
// RootPathLTE applies the LTE predicate on the "root_path" field.
|
||||
func RootPathLTE(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldLTE(FieldRootPath, v))
|
||||
}
|
||||
|
||||
// RootPathContains applies the Contains predicate on the "root_path" field.
|
||||
func RootPathContains(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldContains(FieldRootPath, v))
|
||||
}
|
||||
|
||||
// RootPathHasPrefix applies the HasPrefix predicate on the "root_path" field.
|
||||
func RootPathHasPrefix(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldHasPrefix(FieldRootPath, v))
|
||||
}
|
||||
|
||||
// RootPathHasSuffix applies the HasSuffix predicate on the "root_path" field.
|
||||
func RootPathHasSuffix(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldHasSuffix(FieldRootPath, v))
|
||||
}
|
||||
|
||||
// RootPathEqualFold applies the EqualFold predicate on the "root_path" field.
|
||||
func RootPathEqualFold(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldEqualFold(FieldRootPath, v))
|
||||
}
|
||||
|
||||
// RootPathContainsFold applies the ContainsFold predicate on the "root_path" field.
|
||||
func RootPathContainsFold(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldContainsFold(FieldRootPath, v))
|
||||
}
|
||||
|
||||
// ServiceNameEQ applies the EQ predicate on the "service_name" field.
|
||||
func ServiceNameEQ(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldEQ(FieldServiceName, v))
|
||||
}
|
||||
|
||||
// ServiceNameNEQ applies the NEQ predicate on the "service_name" field.
|
||||
func ServiceNameNEQ(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldNEQ(FieldServiceName, v))
|
||||
}
|
||||
|
||||
// ServiceNameIn applies the In predicate on the "service_name" field.
|
||||
func ServiceNameIn(vs ...string) predicate.Project {
|
||||
return predicate.Project(sql.FieldIn(FieldServiceName, vs...))
|
||||
}
|
||||
|
||||
// ServiceNameNotIn applies the NotIn predicate on the "service_name" field.
|
||||
func ServiceNameNotIn(vs ...string) predicate.Project {
|
||||
return predicate.Project(sql.FieldNotIn(FieldServiceName, vs...))
|
||||
}
|
||||
|
||||
// ServiceNameGT applies the GT predicate on the "service_name" field.
|
||||
func ServiceNameGT(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldGT(FieldServiceName, v))
|
||||
}
|
||||
|
||||
// ServiceNameGTE applies the GTE predicate on the "service_name" field.
|
||||
func ServiceNameGTE(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldGTE(FieldServiceName, v))
|
||||
}
|
||||
|
||||
// ServiceNameLT applies the LT predicate on the "service_name" field.
|
||||
func ServiceNameLT(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldLT(FieldServiceName, v))
|
||||
}
|
||||
|
||||
// ServiceNameLTE applies the LTE predicate on the "service_name" field.
|
||||
func ServiceNameLTE(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldLTE(FieldServiceName, v))
|
||||
}
|
||||
|
||||
// ServiceNameContains applies the Contains predicate on the "service_name" field.
|
||||
func ServiceNameContains(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldContains(FieldServiceName, v))
|
||||
}
|
||||
|
||||
// ServiceNameHasPrefix applies the HasPrefix predicate on the "service_name" field.
|
||||
func ServiceNameHasPrefix(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldHasPrefix(FieldServiceName, v))
|
||||
}
|
||||
|
||||
// ServiceNameHasSuffix applies the HasSuffix predicate on the "service_name" field.
|
||||
func ServiceNameHasSuffix(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldHasSuffix(FieldServiceName, v))
|
||||
}
|
||||
|
||||
// ServiceNameEqualFold applies the EqualFold predicate on the "service_name" field.
|
||||
func ServiceNameEqualFold(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldEqualFold(FieldServiceName, v))
|
||||
}
|
||||
|
||||
// ServiceNameContainsFold applies the ContainsFold predicate on the "service_name" field.
|
||||
func ServiceNameContainsFold(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldContainsFold(FieldServiceName, v))
|
||||
}
|
||||
|
||||
// BinaryPathEQ applies the EQ predicate on the "binary_path" field.
|
||||
func BinaryPathEQ(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldEQ(FieldBinaryPath, v))
|
||||
}
|
||||
|
||||
// BinaryPathNEQ applies the NEQ predicate on the "binary_path" field.
|
||||
func BinaryPathNEQ(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldNEQ(FieldBinaryPath, v))
|
||||
}
|
||||
|
||||
// BinaryPathIn applies the In predicate on the "binary_path" field.
|
||||
func BinaryPathIn(vs ...string) predicate.Project {
|
||||
return predicate.Project(sql.FieldIn(FieldBinaryPath, vs...))
|
||||
}
|
||||
|
||||
// BinaryPathNotIn applies the NotIn predicate on the "binary_path" field.
|
||||
func BinaryPathNotIn(vs ...string) predicate.Project {
|
||||
return predicate.Project(sql.FieldNotIn(FieldBinaryPath, vs...))
|
||||
}
|
||||
|
||||
// BinaryPathGT applies the GT predicate on the "binary_path" field.
|
||||
func BinaryPathGT(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldGT(FieldBinaryPath, v))
|
||||
}
|
||||
|
||||
// BinaryPathGTE applies the GTE predicate on the "binary_path" field.
|
||||
func BinaryPathGTE(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldGTE(FieldBinaryPath, v))
|
||||
}
|
||||
|
||||
// BinaryPathLT applies the LT predicate on the "binary_path" field.
|
||||
func BinaryPathLT(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldLT(FieldBinaryPath, v))
|
||||
}
|
||||
|
||||
// BinaryPathLTE applies the LTE predicate on the "binary_path" field.
|
||||
func BinaryPathLTE(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldLTE(FieldBinaryPath, v))
|
||||
}
|
||||
|
||||
// BinaryPathContains applies the Contains predicate on the "binary_path" field.
|
||||
func BinaryPathContains(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldContains(FieldBinaryPath, v))
|
||||
}
|
||||
|
||||
// BinaryPathHasPrefix applies the HasPrefix predicate on the "binary_path" field.
|
||||
func BinaryPathHasPrefix(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldHasPrefix(FieldBinaryPath, v))
|
||||
}
|
||||
|
||||
// BinaryPathHasSuffix applies the HasSuffix predicate on the "binary_path" field.
|
||||
func BinaryPathHasSuffix(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldHasSuffix(FieldBinaryPath, v))
|
||||
}
|
||||
|
||||
// BinaryPathEqualFold applies the EqualFold predicate on the "binary_path" field.
|
||||
func BinaryPathEqualFold(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldEqualFold(FieldBinaryPath, v))
|
||||
}
|
||||
|
||||
// BinaryPathContainsFold applies the ContainsFold predicate on the "binary_path" field.
|
||||
func BinaryPathContainsFold(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldContainsFold(FieldBinaryPath, v))
|
||||
}
|
||||
|
||||
// MoveToTargetEQ applies the EQ predicate on the "move_to_target" field.
|
||||
func MoveToTargetEQ(v bool) predicate.Project {
|
||||
return predicate.Project(sql.FieldEQ(FieldMoveToTarget, v))
|
||||
}
|
||||
|
||||
// MoveToTargetNEQ applies the NEQ predicate on the "move_to_target" field.
|
||||
func MoveToTargetNEQ(v bool) predicate.Project {
|
||||
return predicate.Project(sql.FieldNEQ(FieldMoveToTarget, v))
|
||||
}
|
||||
|
||||
// BinaryTargetPathEQ applies the EQ predicate on the "binary_target_path" field.
|
||||
func BinaryTargetPathEQ(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldEQ(FieldBinaryTargetPath, v))
|
||||
}
|
||||
|
||||
// BinaryTargetPathNEQ applies the NEQ predicate on the "binary_target_path" field.
|
||||
func BinaryTargetPathNEQ(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldNEQ(FieldBinaryTargetPath, v))
|
||||
}
|
||||
|
||||
// BinaryTargetPathIn applies the In predicate on the "binary_target_path" field.
|
||||
func BinaryTargetPathIn(vs ...string) predicate.Project {
|
||||
return predicate.Project(sql.FieldIn(FieldBinaryTargetPath, vs...))
|
||||
}
|
||||
|
||||
// BinaryTargetPathNotIn applies the NotIn predicate on the "binary_target_path" field.
|
||||
func BinaryTargetPathNotIn(vs ...string) predicate.Project {
|
||||
return predicate.Project(sql.FieldNotIn(FieldBinaryTargetPath, vs...))
|
||||
}
|
||||
|
||||
// BinaryTargetPathGT applies the GT predicate on the "binary_target_path" field.
|
||||
func BinaryTargetPathGT(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldGT(FieldBinaryTargetPath, v))
|
||||
}
|
||||
|
||||
// BinaryTargetPathGTE applies the GTE predicate on the "binary_target_path" field.
|
||||
func BinaryTargetPathGTE(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldGTE(FieldBinaryTargetPath, v))
|
||||
}
|
||||
|
||||
// BinaryTargetPathLT applies the LT predicate on the "binary_target_path" field.
|
||||
func BinaryTargetPathLT(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldLT(FieldBinaryTargetPath, v))
|
||||
}
|
||||
|
||||
// BinaryTargetPathLTE applies the LTE predicate on the "binary_target_path" field.
|
||||
func BinaryTargetPathLTE(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldLTE(FieldBinaryTargetPath, v))
|
||||
}
|
||||
|
||||
// BinaryTargetPathContains applies the Contains predicate on the "binary_target_path" field.
|
||||
func BinaryTargetPathContains(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldContains(FieldBinaryTargetPath, v))
|
||||
}
|
||||
|
||||
// BinaryTargetPathHasPrefix applies the HasPrefix predicate on the "binary_target_path" field.
|
||||
func BinaryTargetPathHasPrefix(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldHasPrefix(FieldBinaryTargetPath, v))
|
||||
}
|
||||
|
||||
// BinaryTargetPathHasSuffix applies the HasSuffix predicate on the "binary_target_path" field.
|
||||
func BinaryTargetPathHasSuffix(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldHasSuffix(FieldBinaryTargetPath, v))
|
||||
}
|
||||
|
||||
// BinaryTargetPathIsNil applies the IsNil predicate on the "binary_target_path" field.
|
||||
func BinaryTargetPathIsNil() predicate.Project {
|
||||
return predicate.Project(sql.FieldIsNull(FieldBinaryTargetPath))
|
||||
}
|
||||
|
||||
// BinaryTargetPathNotNil applies the NotNil predicate on the "binary_target_path" field.
|
||||
func BinaryTargetPathNotNil() predicate.Project {
|
||||
return predicate.Project(sql.FieldNotNull(FieldBinaryTargetPath))
|
||||
}
|
||||
|
||||
// BinaryTargetPathEqualFold applies the EqualFold predicate on the "binary_target_path" field.
|
||||
func BinaryTargetPathEqualFold(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldEqualFold(FieldBinaryTargetPath, v))
|
||||
}
|
||||
|
||||
// BinaryTargetPathContainsFold applies the ContainsFold predicate on the "binary_target_path" field.
|
||||
func BinaryTargetPathContainsFold(v string) predicate.Project {
|
||||
return predicate.Project(sql.FieldContainsFold(FieldBinaryTargetPath, v))
|
||||
}
|
||||
|
||||
// HasLogentries applies the HasEdge predicate on the "logentries" edge.
|
||||
func HasLogentries() predicate.Project {
|
||||
return predicate.Project(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, LogentriesTable, LogentriesColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasLogentriesWith applies the HasEdge predicate on the "logentries" edge with a given conditions (other predicates).
|
||||
func HasLogentriesWith(preds ...predicate.Logentry) predicate.Project {
|
||||
return predicate.Project(func(s *sql.Selector) {
|
||||
step := newLogentriesStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.Project) predicate.Project {
|
||||
return predicate.Project(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.Project) predicate.Project {
|
||||
return predicate.Project(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.Project) predicate.Project {
|
||||
return predicate.Project(sql.NotPredicates(p))
|
||||
}
|
354
ent/project_create.go
Normal file
354
ent/project_create.go
Normal file
@ -0,0 +1,354 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent/logentry"
|
||||
"code.icod.de/dalu/gomanager/ent/project"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
)
|
||||
|
||||
// ProjectCreate is the builder for creating a Project entity.
|
||||
type ProjectCreate struct {
|
||||
config
|
||||
mutation *ProjectMutation
|
||||
hooks []Hook
|
||||
}
|
||||
|
||||
// SetCreateTime sets the "create_time" field.
|
||||
func (pc *ProjectCreate) SetCreateTime(t time.Time) *ProjectCreate {
|
||||
pc.mutation.SetCreateTime(t)
|
||||
return pc
|
||||
}
|
||||
|
||||
// SetNillableCreateTime sets the "create_time" field if the given value is not nil.
|
||||
func (pc *ProjectCreate) SetNillableCreateTime(t *time.Time) *ProjectCreate {
|
||||
if t != nil {
|
||||
pc.SetCreateTime(*t)
|
||||
}
|
||||
return pc
|
||||
}
|
||||
|
||||
// SetUser sets the "user" field.
|
||||
func (pc *ProjectCreate) SetUser(s string) *ProjectCreate {
|
||||
pc.mutation.SetUser(s)
|
||||
return pc
|
||||
}
|
||||
|
||||
// SetGroup sets the "group" field.
|
||||
func (pc *ProjectCreate) SetGroup(s string) *ProjectCreate {
|
||||
pc.mutation.SetGroup(s)
|
||||
return pc
|
||||
}
|
||||
|
||||
// SetRootPath sets the "root_path" field.
|
||||
func (pc *ProjectCreate) SetRootPath(s string) *ProjectCreate {
|
||||
pc.mutation.SetRootPath(s)
|
||||
return pc
|
||||
}
|
||||
|
||||
// SetServiceName sets the "service_name" field.
|
||||
func (pc *ProjectCreate) SetServiceName(s string) *ProjectCreate {
|
||||
pc.mutation.SetServiceName(s)
|
||||
return pc
|
||||
}
|
||||
|
||||
// SetBinaryPath sets the "binary_path" field.
|
||||
func (pc *ProjectCreate) SetBinaryPath(s string) *ProjectCreate {
|
||||
pc.mutation.SetBinaryPath(s)
|
||||
return pc
|
||||
}
|
||||
|
||||
// SetMoveToTarget sets the "move_to_target" field.
|
||||
func (pc *ProjectCreate) SetMoveToTarget(b bool) *ProjectCreate {
|
||||
pc.mutation.SetMoveToTarget(b)
|
||||
return pc
|
||||
}
|
||||
|
||||
// SetNillableMoveToTarget sets the "move_to_target" field if the given value is not nil.
|
||||
func (pc *ProjectCreate) SetNillableMoveToTarget(b *bool) *ProjectCreate {
|
||||
if b != nil {
|
||||
pc.SetMoveToTarget(*b)
|
||||
}
|
||||
return pc
|
||||
}
|
||||
|
||||
// SetBinaryTargetPath sets the "binary_target_path" field.
|
||||
func (pc *ProjectCreate) SetBinaryTargetPath(s string) *ProjectCreate {
|
||||
pc.mutation.SetBinaryTargetPath(s)
|
||||
return pc
|
||||
}
|
||||
|
||||
// SetNillableBinaryTargetPath sets the "binary_target_path" field if the given value is not nil.
|
||||
func (pc *ProjectCreate) SetNillableBinaryTargetPath(s *string) *ProjectCreate {
|
||||
if s != nil {
|
||||
pc.SetBinaryTargetPath(*s)
|
||||
}
|
||||
return pc
|
||||
}
|
||||
|
||||
// SetID sets the "id" field.
|
||||
func (pc *ProjectCreate) SetID(i int) *ProjectCreate {
|
||||
pc.mutation.SetID(i)
|
||||
return pc
|
||||
}
|
||||
|
||||
// AddLogentryIDs adds the "logentries" edge to the Logentry entity by IDs.
|
||||
func (pc *ProjectCreate) AddLogentryIDs(ids ...int) *ProjectCreate {
|
||||
pc.mutation.AddLogentryIDs(ids...)
|
||||
return pc
|
||||
}
|
||||
|
||||
// AddLogentries adds the "logentries" edges to the Logentry entity.
|
||||
func (pc *ProjectCreate) AddLogentries(l ...*Logentry) *ProjectCreate {
|
||||
ids := make([]int, len(l))
|
||||
for i := range l {
|
||||
ids[i] = l[i].ID
|
||||
}
|
||||
return pc.AddLogentryIDs(ids...)
|
||||
}
|
||||
|
||||
// Mutation returns the ProjectMutation object of the builder.
|
||||
func (pc *ProjectCreate) Mutation() *ProjectMutation {
|
||||
return pc.mutation
|
||||
}
|
||||
|
||||
// Save creates the Project in the database.
|
||||
func (pc *ProjectCreate) Save(ctx context.Context) (*Project, error) {
|
||||
pc.defaults()
|
||||
return withHooks(ctx, pc.sqlSave, pc.mutation, pc.hooks)
|
||||
}
|
||||
|
||||
// SaveX calls Save and panics if Save returns an error.
|
||||
func (pc *ProjectCreate) SaveX(ctx context.Context) *Project {
|
||||
v, err := pc.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (pc *ProjectCreate) Exec(ctx context.Context) error {
|
||||
_, err := pc.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (pc *ProjectCreate) ExecX(ctx context.Context) {
|
||||
if err := pc.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (pc *ProjectCreate) defaults() {
|
||||
if _, ok := pc.mutation.CreateTime(); !ok {
|
||||
v := project.DefaultCreateTime()
|
||||
pc.mutation.SetCreateTime(v)
|
||||
}
|
||||
if _, ok := pc.mutation.MoveToTarget(); !ok {
|
||||
v := project.DefaultMoveToTarget
|
||||
pc.mutation.SetMoveToTarget(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (pc *ProjectCreate) check() error {
|
||||
if _, ok := pc.mutation.CreateTime(); !ok {
|
||||
return &ValidationError{Name: "create_time", err: errors.New(`ent: missing required field "Project.create_time"`)}
|
||||
}
|
||||
if _, ok := pc.mutation.User(); !ok {
|
||||
return &ValidationError{Name: "user", err: errors.New(`ent: missing required field "Project.user"`)}
|
||||
}
|
||||
if _, ok := pc.mutation.Group(); !ok {
|
||||
return &ValidationError{Name: "group", err: errors.New(`ent: missing required field "Project.group"`)}
|
||||
}
|
||||
if _, ok := pc.mutation.RootPath(); !ok {
|
||||
return &ValidationError{Name: "root_path", err: errors.New(`ent: missing required field "Project.root_path"`)}
|
||||
}
|
||||
if _, ok := pc.mutation.ServiceName(); !ok {
|
||||
return &ValidationError{Name: "service_name", err: errors.New(`ent: missing required field "Project.service_name"`)}
|
||||
}
|
||||
if _, ok := pc.mutation.BinaryPath(); !ok {
|
||||
return &ValidationError{Name: "binary_path", err: errors.New(`ent: missing required field "Project.binary_path"`)}
|
||||
}
|
||||
if _, ok := pc.mutation.MoveToTarget(); !ok {
|
||||
return &ValidationError{Name: "move_to_target", err: errors.New(`ent: missing required field "Project.move_to_target"`)}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pc *ProjectCreate) sqlSave(ctx context.Context) (*Project, error) {
|
||||
if err := pc.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_node, _spec := pc.createSpec()
|
||||
if err := sqlgraph.CreateNode(ctx, pc.driver, _spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if _spec.ID.Value != _node.ID {
|
||||
id := _spec.ID.Value.(int64)
|
||||
_node.ID = int(id)
|
||||
}
|
||||
pc.mutation.id = &_node.ID
|
||||
pc.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
|
||||
func (pc *ProjectCreate) createSpec() (*Project, *sqlgraph.CreateSpec) {
|
||||
var (
|
||||
_node = &Project{config: pc.config}
|
||||
_spec = sqlgraph.NewCreateSpec(project.Table, sqlgraph.NewFieldSpec(project.FieldID, field.TypeInt))
|
||||
)
|
||||
if id, ok := pc.mutation.ID(); ok {
|
||||
_node.ID = id
|
||||
_spec.ID.Value = id
|
||||
}
|
||||
if value, ok := pc.mutation.CreateTime(); ok {
|
||||
_spec.SetField(project.FieldCreateTime, field.TypeTime, value)
|
||||
_node.CreateTime = value
|
||||
}
|
||||
if value, ok := pc.mutation.User(); ok {
|
||||
_spec.SetField(project.FieldUser, field.TypeString, value)
|
||||
_node.User = value
|
||||
}
|
||||
if value, ok := pc.mutation.Group(); ok {
|
||||
_spec.SetField(project.FieldGroup, field.TypeString, value)
|
||||
_node.Group = value
|
||||
}
|
||||
if value, ok := pc.mutation.RootPath(); ok {
|
||||
_spec.SetField(project.FieldRootPath, field.TypeString, value)
|
||||
_node.RootPath = value
|
||||
}
|
||||
if value, ok := pc.mutation.ServiceName(); ok {
|
||||
_spec.SetField(project.FieldServiceName, field.TypeString, value)
|
||||
_node.ServiceName = value
|
||||
}
|
||||
if value, ok := pc.mutation.BinaryPath(); ok {
|
||||
_spec.SetField(project.FieldBinaryPath, field.TypeString, value)
|
||||
_node.BinaryPath = value
|
||||
}
|
||||
if value, ok := pc.mutation.MoveToTarget(); ok {
|
||||
_spec.SetField(project.FieldMoveToTarget, field.TypeBool, value)
|
||||
_node.MoveToTarget = value
|
||||
}
|
||||
if value, ok := pc.mutation.BinaryTargetPath(); ok {
|
||||
_spec.SetField(project.FieldBinaryTargetPath, field.TypeString, value)
|
||||
_node.BinaryTargetPath = value
|
||||
}
|
||||
if nodes := pc.mutation.LogentriesIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: project.LogentriesTable,
|
||||
Columns: []string{project.LogentriesColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(logentry.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges = append(_spec.Edges, edge)
|
||||
}
|
||||
return _node, _spec
|
||||
}
|
||||
|
||||
// ProjectCreateBulk is the builder for creating many Project entities in bulk.
|
||||
type ProjectCreateBulk struct {
|
||||
config
|
||||
err error
|
||||
builders []*ProjectCreate
|
||||
}
|
||||
|
||||
// Save creates the Project entities in the database.
|
||||
func (pcb *ProjectCreateBulk) Save(ctx context.Context) ([]*Project, error) {
|
||||
if pcb.err != nil {
|
||||
return nil, pcb.err
|
||||
}
|
||||
specs := make([]*sqlgraph.CreateSpec, len(pcb.builders))
|
||||
nodes := make([]*Project, len(pcb.builders))
|
||||
mutators := make([]Mutator, len(pcb.builders))
|
||||
for i := range pcb.builders {
|
||||
func(i int, root context.Context) {
|
||||
builder := pcb.builders[i]
|
||||
builder.defaults()
|
||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||
mutation, ok := m.(*ProjectMutation)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||
}
|
||||
if err := builder.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
builder.mutation = mutation
|
||||
var err error
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, pcb.builders[i+1].mutation)
|
||||
} else {
|
||||
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||
// Invoke the actual operation on the latest mutation in the chain.
|
||||
if err = sqlgraph.BatchCreate(ctx, pcb.driver, spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mutation.id = &nodes[i].ID
|
||||
if specs[i].ID.Value != nil && nodes[i].ID == 0 {
|
||||
id := specs[i].ID.Value.(int64)
|
||||
nodes[i].ID = int(id)
|
||||
}
|
||||
mutation.done = true
|
||||
return nodes[i], nil
|
||||
})
|
||||
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||
mut = builder.hooks[i](mut)
|
||||
}
|
||||
mutators[i] = mut
|
||||
}(i, ctx)
|
||||
}
|
||||
if len(mutators) > 0 {
|
||||
if _, err := mutators[0].Mutate(ctx, pcb.builders[0].mutation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (pcb *ProjectCreateBulk) SaveX(ctx context.Context) []*Project {
|
||||
v, err := pcb.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (pcb *ProjectCreateBulk) Exec(ctx context.Context) error {
|
||||
_, err := pcb.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (pcb *ProjectCreateBulk) ExecX(ctx context.Context) {
|
||||
if err := pcb.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
88
ent/project_delete.go
Normal file
88
ent/project_delete.go
Normal file
@ -0,0 +1,88 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent/predicate"
|
||||
"code.icod.de/dalu/gomanager/ent/project"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
)
|
||||
|
||||
// ProjectDelete is the builder for deleting a Project entity.
|
||||
type ProjectDelete struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *ProjectMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ProjectDelete builder.
|
||||
func (pd *ProjectDelete) Where(ps ...predicate.Project) *ProjectDelete {
|
||||
pd.mutation.Where(ps...)
|
||||
return pd
|
||||
}
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (pd *ProjectDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, pd.sqlExec, pd.mutation, pd.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (pd *ProjectDelete) ExecX(ctx context.Context) int {
|
||||
n, err := pd.Exec(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (pd *ProjectDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
_spec := sqlgraph.NewDeleteSpec(project.Table, sqlgraph.NewFieldSpec(project.FieldID, field.TypeInt))
|
||||
if ps := pd.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
affected, err := sqlgraph.DeleteNodes(ctx, pd.driver, _spec)
|
||||
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
pd.mutation.done = true
|
||||
return affected, err
|
||||
}
|
||||
|
||||
// ProjectDeleteOne is the builder for deleting a single Project entity.
|
||||
type ProjectDeleteOne struct {
|
||||
pd *ProjectDelete
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ProjectDelete builder.
|
||||
func (pdo *ProjectDeleteOne) Where(ps ...predicate.Project) *ProjectDeleteOne {
|
||||
pdo.pd.mutation.Where(ps...)
|
||||
return pdo
|
||||
}
|
||||
|
||||
// Exec executes the deletion query.
|
||||
func (pdo *ProjectDeleteOne) Exec(ctx context.Context) error {
|
||||
n, err := pdo.pd.Exec(ctx)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case n == 0:
|
||||
return &NotFoundError{project.Label}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (pdo *ProjectDeleteOne) ExecX(ctx context.Context) {
|
||||
if err := pdo.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
606
ent/project_query.go
Normal file
606
ent/project_query.go
Normal file
@ -0,0 +1,606 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent/logentry"
|
||||
"code.icod.de/dalu/gomanager/ent/predicate"
|
||||
"code.icod.de/dalu/gomanager/ent/project"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
)
|
||||
|
||||
// ProjectQuery is the builder for querying Project entities.
|
||||
type ProjectQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []project.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.Project
|
||||
withLogentries *LogentryQuery
|
||||
// intermediate query (i.e. traversal path).
|
||||
sql *sql.Selector
|
||||
path func(context.Context) (*sql.Selector, error)
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the ProjectQuery builder.
|
||||
func (pq *ProjectQuery) Where(ps ...predicate.Project) *ProjectQuery {
|
||||
pq.predicates = append(pq.predicates, ps...)
|
||||
return pq
|
||||
}
|
||||
|
||||
// Limit the number of records to be returned by this query.
|
||||
func (pq *ProjectQuery) Limit(limit int) *ProjectQuery {
|
||||
pq.ctx.Limit = &limit
|
||||
return pq
|
||||
}
|
||||
|
||||
// Offset to start from.
|
||||
func (pq *ProjectQuery) Offset(offset int) *ProjectQuery {
|
||||
pq.ctx.Offset = &offset
|
||||
return pq
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (pq *ProjectQuery) Unique(unique bool) *ProjectQuery {
|
||||
pq.ctx.Unique = &unique
|
||||
return pq
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (pq *ProjectQuery) Order(o ...project.OrderOption) *ProjectQuery {
|
||||
pq.order = append(pq.order, o...)
|
||||
return pq
|
||||
}
|
||||
|
||||
// QueryLogentries chains the current query on the "logentries" edge.
|
||||
func (pq *ProjectQuery) QueryLogentries() *LogentryQuery {
|
||||
query := (&LogentryClient{config: pq.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := pq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := pq.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(project.Table, project.FieldID, selector),
|
||||
sqlgraph.To(logentry.Table, logentry.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, project.LogentriesTable, project.LogentriesColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(pq.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// First returns the first Project entity from the query.
|
||||
// Returns a *NotFoundError when no Project was found.
|
||||
func (pq *ProjectQuery) First(ctx context.Context) (*Project, error) {
|
||||
nodes, err := pq.Limit(1).All(setContextOp(ctx, pq.ctx, "First"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nil, &NotFoundError{project.Label}
|
||||
}
|
||||
return nodes[0], nil
|
||||
}
|
||||
|
||||
// FirstX is like First, but panics if an error occurs.
|
||||
func (pq *ProjectQuery) FirstX(ctx context.Context) *Project {
|
||||
node, err := pq.First(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// FirstID returns the first Project ID from the query.
|
||||
// Returns a *NotFoundError when no Project ID was found.
|
||||
func (pq *ProjectQuery) FirstID(ctx context.Context) (id int, err error) {
|
||||
var ids []int
|
||||
if ids, err = pq.Limit(1).IDs(setContextOp(ctx, pq.ctx, "FirstID")); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
err = &NotFoundError{project.Label}
|
||||
return
|
||||
}
|
||||
return ids[0], nil
|
||||
}
|
||||
|
||||
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||
func (pq *ProjectQuery) FirstIDX(ctx context.Context) int {
|
||||
id, err := pq.FirstID(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// Only returns a single Project entity found by the query, ensuring it only returns one.
|
||||
// Returns a *NotSingularError when more than one Project entity is found.
|
||||
// Returns a *NotFoundError when no Project entities are found.
|
||||
func (pq *ProjectQuery) Only(ctx context.Context) (*Project, error) {
|
||||
nodes, err := pq.Limit(2).All(setContextOp(ctx, pq.ctx, "Only"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(nodes) {
|
||||
case 1:
|
||||
return nodes[0], nil
|
||||
case 0:
|
||||
return nil, &NotFoundError{project.Label}
|
||||
default:
|
||||
return nil, &NotSingularError{project.Label}
|
||||
}
|
||||
}
|
||||
|
||||
// OnlyX is like Only, but panics if an error occurs.
|
||||
func (pq *ProjectQuery) OnlyX(ctx context.Context) *Project {
|
||||
node, err := pq.Only(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// OnlyID is like Only, but returns the only Project ID in the query.
|
||||
// Returns a *NotSingularError when more than one Project ID is found.
|
||||
// Returns a *NotFoundError when no entities are found.
|
||||
func (pq *ProjectQuery) OnlyID(ctx context.Context) (id int, err error) {
|
||||
var ids []int
|
||||
if ids, err = pq.Limit(2).IDs(setContextOp(ctx, pq.ctx, "OnlyID")); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
case 1:
|
||||
id = ids[0]
|
||||
case 0:
|
||||
err = &NotFoundError{project.Label}
|
||||
default:
|
||||
err = &NotSingularError{project.Label}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||
func (pq *ProjectQuery) OnlyIDX(ctx context.Context) int {
|
||||
id, err := pq.OnlyID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// All executes the query and returns a list of Projects.
|
||||
func (pq *ProjectQuery) All(ctx context.Context) ([]*Project, error) {
|
||||
ctx = setContextOp(ctx, pq.ctx, "All")
|
||||
if err := pq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qr := querierAll[[]*Project, *ProjectQuery]()
|
||||
return withInterceptors[[]*Project](ctx, pq, qr, pq.inters)
|
||||
}
|
||||
|
||||
// AllX is like All, but panics if an error occurs.
|
||||
func (pq *ProjectQuery) AllX(ctx context.Context) []*Project {
|
||||
nodes, err := pq.All(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// IDs executes the query and returns a list of Project IDs.
|
||||
func (pq *ProjectQuery) IDs(ctx context.Context) (ids []int, err error) {
|
||||
if pq.ctx.Unique == nil && pq.path != nil {
|
||||
pq.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, pq.ctx, "IDs")
|
||||
if err = pq.Select(project.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// IDsX is like IDs, but panics if an error occurs.
|
||||
func (pq *ProjectQuery) IDsX(ctx context.Context) []int {
|
||||
ids, err := pq.IDs(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// Count returns the count of the given query.
|
||||
func (pq *ProjectQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, pq.ctx, "Count")
|
||||
if err := pq.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return withInterceptors[int](ctx, pq, querierCount[*ProjectQuery](), pq.inters)
|
||||
}
|
||||
|
||||
// CountX is like Count, but panics if an error occurs.
|
||||
func (pq *ProjectQuery) CountX(ctx context.Context) int {
|
||||
count, err := pq.Count(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (pq *ProjectQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, pq.ctx, "Exist")
|
||||
switch _, err := pq.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExistX is like Exist, but panics if an error occurs.
|
||||
func (pq *ProjectQuery) ExistX(ctx context.Context) bool {
|
||||
exist, err := pq.Exist(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return exist
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the ProjectQuery builder, including all associated steps. It can be
|
||||
// used to prepare common query builders and use them differently after the clone is made.
|
||||
func (pq *ProjectQuery) Clone() *ProjectQuery {
|
||||
if pq == nil {
|
||||
return nil
|
||||
}
|
||||
return &ProjectQuery{
|
||||
config: pq.config,
|
||||
ctx: pq.ctx.Clone(),
|
||||
order: append([]project.OrderOption{}, pq.order...),
|
||||
inters: append([]Interceptor{}, pq.inters...),
|
||||
predicates: append([]predicate.Project{}, pq.predicates...),
|
||||
withLogentries: pq.withLogentries.Clone(),
|
||||
// clone intermediate query.
|
||||
sql: pq.sql.Clone(),
|
||||
path: pq.path,
|
||||
}
|
||||
}
|
||||
|
||||
// WithLogentries tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "logentries" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (pq *ProjectQuery) WithLogentries(opts ...func(*LogentryQuery)) *ProjectQuery {
|
||||
query := (&LogentryClient{config: pq.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
pq.withLogentries = query
|
||||
return pq
|
||||
}
|
||||
|
||||
// GroupBy is used to group vertices by one or more fields/columns.
|
||||
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreateTime time.Time `json:"create_time,omitempty"`
|
||||
// Count int `json:"count,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.Project.Query().
|
||||
// GroupBy(project.FieldCreateTime).
|
||||
// Aggregate(ent.Count()).
|
||||
// Scan(ctx, &v)
|
||||
func (pq *ProjectQuery) GroupBy(field string, fields ...string) *ProjectGroupBy {
|
||||
pq.ctx.Fields = append([]string{field}, fields...)
|
||||
grbuild := &ProjectGroupBy{build: pq}
|
||||
grbuild.flds = &pq.ctx.Fields
|
||||
grbuild.label = project.Label
|
||||
grbuild.scan = grbuild.Scan
|
||||
return grbuild
|
||||
}
|
||||
|
||||
// Select allows the selection one or more fields/columns for the given query,
|
||||
// instead of selecting all fields in the entity.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreateTime time.Time `json:"create_time,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.Project.Query().
|
||||
// Select(project.FieldCreateTime).
|
||||
// Scan(ctx, &v)
|
||||
func (pq *ProjectQuery) Select(fields ...string) *ProjectSelect {
|
||||
pq.ctx.Fields = append(pq.ctx.Fields, fields...)
|
||||
sbuild := &ProjectSelect{ProjectQuery: pq}
|
||||
sbuild.label = project.Label
|
||||
sbuild.flds, sbuild.scan = &pq.ctx.Fields, sbuild.Scan
|
||||
return sbuild
|
||||
}
|
||||
|
||||
// Aggregate returns a ProjectSelect configured with the given aggregations.
|
||||
func (pq *ProjectQuery) Aggregate(fns ...AggregateFunc) *ProjectSelect {
|
||||
return pq.Select().Aggregate(fns...)
|
||||
}
|
||||
|
||||
func (pq *ProjectQuery) prepareQuery(ctx context.Context) error {
|
||||
for _, inter := range pq.inters {
|
||||
if inter == nil {
|
||||
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||
}
|
||||
if trv, ok := inter.(Traverser); ok {
|
||||
if err := trv.Traverse(ctx, pq); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, f := range pq.ctx.Fields {
|
||||
if !project.ValidColumn(f) {
|
||||
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
}
|
||||
if pq.path != nil {
|
||||
prev, err := pq.path(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pq.sql = prev
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pq *ProjectQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Project, error) {
|
||||
var (
|
||||
nodes = []*Project{}
|
||||
_spec = pq.querySpec()
|
||||
loadedTypes = [1]bool{
|
||||
pq.withLogentries != nil,
|
||||
}
|
||||
)
|
||||
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||
return (*Project).scanValues(nil, columns)
|
||||
}
|
||||
_spec.Assign = func(columns []string, values []any) error {
|
||||
node := &Project{config: pq.config}
|
||||
nodes = append(nodes, node)
|
||||
node.Edges.loadedTypes = loadedTypes
|
||||
return node.assignValues(columns, values)
|
||||
}
|
||||
for i := range hooks {
|
||||
hooks[i](ctx, _spec)
|
||||
}
|
||||
if err := sqlgraph.QueryNodes(ctx, pq.driver, _spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nodes, nil
|
||||
}
|
||||
if query := pq.withLogentries; query != nil {
|
||||
if err := pq.loadLogentries(ctx, query, nodes,
|
||||
func(n *Project) { n.Edges.Logentries = []*Logentry{} },
|
||||
func(n *Project, e *Logentry) { n.Edges.Logentries = append(n.Edges.Logentries, e) }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (pq *ProjectQuery) loadLogentries(ctx context.Context, query *LogentryQuery, nodes []*Project, init func(*Project), assign func(*Project, *Logentry)) error {
|
||||
fks := make([]driver.Value, 0, len(nodes))
|
||||
nodeids := make(map[int]*Project)
|
||||
for i := range nodes {
|
||||
fks = append(fks, nodes[i].ID)
|
||||
nodeids[nodes[i].ID] = nodes[i]
|
||||
if init != nil {
|
||||
init(nodes[i])
|
||||
}
|
||||
}
|
||||
query.withFKs = true
|
||||
query.Where(predicate.Logentry(func(s *sql.Selector) {
|
||||
s.Where(sql.InValues(s.C(project.LogentriesColumn), fks...))
|
||||
}))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
fk := n.project_logentries
|
||||
if fk == nil {
|
||||
return fmt.Errorf(`foreign-key "project_logentries" is nil for node %v`, n.ID)
|
||||
}
|
||||
node, ok := nodeids[*fk]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected referenced foreign-key "project_logentries" returned %v for node %v`, *fk, n.ID)
|
||||
}
|
||||
assign(node, n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pq *ProjectQuery) sqlCount(ctx context.Context) (int, error) {
|
||||
_spec := pq.querySpec()
|
||||
_spec.Node.Columns = pq.ctx.Fields
|
||||
if len(pq.ctx.Fields) > 0 {
|
||||
_spec.Unique = pq.ctx.Unique != nil && *pq.ctx.Unique
|
||||
}
|
||||
return sqlgraph.CountNodes(ctx, pq.driver, _spec)
|
||||
}
|
||||
|
||||
func (pq *ProjectQuery) querySpec() *sqlgraph.QuerySpec {
|
||||
_spec := sqlgraph.NewQuerySpec(project.Table, project.Columns, sqlgraph.NewFieldSpec(project.FieldID, field.TypeInt))
|
||||
_spec.From = pq.sql
|
||||
if unique := pq.ctx.Unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
} else if pq.path != nil {
|
||||
_spec.Unique = true
|
||||
}
|
||||
if fields := pq.ctx.Fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, project.FieldID)
|
||||
for i := range fields {
|
||||
if fields[i] != project.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := pq.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if limit := pq.ctx.Limit; limit != nil {
|
||||
_spec.Limit = *limit
|
||||
}
|
||||
if offset := pq.ctx.Offset; offset != nil {
|
||||
_spec.Offset = *offset
|
||||
}
|
||||
if ps := pq.order; len(ps) > 0 {
|
||||
_spec.Order = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
return _spec
|
||||
}
|
||||
|
||||
func (pq *ProjectQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(pq.driver.Dialect())
|
||||
t1 := builder.Table(project.Table)
|
||||
columns := pq.ctx.Fields
|
||||
if len(columns) == 0 {
|
||||
columns = project.Columns
|
||||
}
|
||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||
if pq.sql != nil {
|
||||
selector = pq.sql
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
if pq.ctx.Unique != nil && *pq.ctx.Unique {
|
||||
selector.Distinct()
|
||||
}
|
||||
for _, p := range pq.predicates {
|
||||
p(selector)
|
||||
}
|
||||
for _, p := range pq.order {
|
||||
p(selector)
|
||||
}
|
||||
if offset := pq.ctx.Offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
// with default value, and override it below if needed.
|
||||
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||
}
|
||||
if limit := pq.ctx.Limit; limit != nil {
|
||||
selector.Limit(*limit)
|
||||
}
|
||||
return selector
|
||||
}
|
||||
|
||||
// ProjectGroupBy is the group-by builder for Project entities.
|
||||
type ProjectGroupBy struct {
|
||||
selector
|
||||
build *ProjectQuery
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the group-by query.
|
||||
func (pgb *ProjectGroupBy) Aggregate(fns ...AggregateFunc) *ProjectGroupBy {
|
||||
pgb.fns = append(pgb.fns, fns...)
|
||||
return pgb
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (pgb *ProjectGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, pgb.build.ctx, "GroupBy")
|
||||
if err := pgb.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*ProjectQuery, *ProjectGroupBy](ctx, pgb.build, pgb, pgb.build.inters, v)
|
||||
}
|
||||
|
||||
func (pgb *ProjectGroupBy) sqlScan(ctx context.Context, root *ProjectQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx).Select()
|
||||
aggregation := make([]string, 0, len(pgb.fns))
|
||||
for _, fn := range pgb.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
if len(selector.SelectedColumns()) == 0 {
|
||||
columns := make([]string, 0, len(*pgb.flds)+len(pgb.fns))
|
||||
for _, f := range *pgb.flds {
|
||||
columns = append(columns, selector.C(f))
|
||||
}
|
||||
columns = append(columns, aggregation...)
|
||||
selector.Select(columns...)
|
||||
}
|
||||
selector.GroupBy(selector.Columns(*pgb.flds...)...)
|
||||
if err := selector.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := pgb.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
// ProjectSelect is the builder for selecting fields of Project entities.
|
||||
type ProjectSelect struct {
|
||||
*ProjectQuery
|
||||
selector
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the selector query.
|
||||
func (ps *ProjectSelect) Aggregate(fns ...AggregateFunc) *ProjectSelect {
|
||||
ps.fns = append(ps.fns, fns...)
|
||||
return ps
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (ps *ProjectSelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, ps.ctx, "Select")
|
||||
if err := ps.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*ProjectQuery, *ProjectSelect](ctx, ps.ProjectQuery, ps, ps.inters, v)
|
||||
}
|
||||
|
||||
func (ps *ProjectSelect) sqlScan(ctx context.Context, root *ProjectQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx)
|
||||
aggregation := make([]string, 0, len(ps.fns))
|
||||
for _, fn := range ps.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
switch n := len(*ps.selector.flds); {
|
||||
case n == 0 && len(aggregation) > 0:
|
||||
selector.Select(aggregation...)
|
||||
case n != 0 && len(aggregation) > 0:
|
||||
selector.AppendSelect(aggregation...)
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := ps.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
594
ent/project_update.go
Normal file
594
ent/project_update.go
Normal file
@ -0,0 +1,594 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent/logentry"
|
||||
"code.icod.de/dalu/gomanager/ent/predicate"
|
||||
"code.icod.de/dalu/gomanager/ent/project"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
)
|
||||
|
||||
// ProjectUpdate is the builder for updating Project entities.
|
||||
type ProjectUpdate struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *ProjectMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ProjectUpdate builder.
|
||||
func (pu *ProjectUpdate) Where(ps ...predicate.Project) *ProjectUpdate {
|
||||
pu.mutation.Where(ps...)
|
||||
return pu
|
||||
}
|
||||
|
||||
// SetUser sets the "user" field.
|
||||
func (pu *ProjectUpdate) SetUser(s string) *ProjectUpdate {
|
||||
pu.mutation.SetUser(s)
|
||||
return pu
|
||||
}
|
||||
|
||||
// SetNillableUser sets the "user" field if the given value is not nil.
|
||||
func (pu *ProjectUpdate) SetNillableUser(s *string) *ProjectUpdate {
|
||||
if s != nil {
|
||||
pu.SetUser(*s)
|
||||
}
|
||||
return pu
|
||||
}
|
||||
|
||||
// SetGroup sets the "group" field.
|
||||
func (pu *ProjectUpdate) SetGroup(s string) *ProjectUpdate {
|
||||
pu.mutation.SetGroup(s)
|
||||
return pu
|
||||
}
|
||||
|
||||
// SetNillableGroup sets the "group" field if the given value is not nil.
|
||||
func (pu *ProjectUpdate) SetNillableGroup(s *string) *ProjectUpdate {
|
||||
if s != nil {
|
||||
pu.SetGroup(*s)
|
||||
}
|
||||
return pu
|
||||
}
|
||||
|
||||
// SetRootPath sets the "root_path" field.
|
||||
func (pu *ProjectUpdate) SetRootPath(s string) *ProjectUpdate {
|
||||
pu.mutation.SetRootPath(s)
|
||||
return pu
|
||||
}
|
||||
|
||||
// SetNillableRootPath sets the "root_path" field if the given value is not nil.
|
||||
func (pu *ProjectUpdate) SetNillableRootPath(s *string) *ProjectUpdate {
|
||||
if s != nil {
|
||||
pu.SetRootPath(*s)
|
||||
}
|
||||
return pu
|
||||
}
|
||||
|
||||
// SetServiceName sets the "service_name" field.
|
||||
func (pu *ProjectUpdate) SetServiceName(s string) *ProjectUpdate {
|
||||
pu.mutation.SetServiceName(s)
|
||||
return pu
|
||||
}
|
||||
|
||||
// SetNillableServiceName sets the "service_name" field if the given value is not nil.
|
||||
func (pu *ProjectUpdate) SetNillableServiceName(s *string) *ProjectUpdate {
|
||||
if s != nil {
|
||||
pu.SetServiceName(*s)
|
||||
}
|
||||
return pu
|
||||
}
|
||||
|
||||
// SetBinaryPath sets the "binary_path" field.
|
||||
func (pu *ProjectUpdate) SetBinaryPath(s string) *ProjectUpdate {
|
||||
pu.mutation.SetBinaryPath(s)
|
||||
return pu
|
||||
}
|
||||
|
||||
// SetNillableBinaryPath sets the "binary_path" field if the given value is not nil.
|
||||
func (pu *ProjectUpdate) SetNillableBinaryPath(s *string) *ProjectUpdate {
|
||||
if s != nil {
|
||||
pu.SetBinaryPath(*s)
|
||||
}
|
||||
return pu
|
||||
}
|
||||
|
||||
// SetMoveToTarget sets the "move_to_target" field.
|
||||
func (pu *ProjectUpdate) SetMoveToTarget(b bool) *ProjectUpdate {
|
||||
pu.mutation.SetMoveToTarget(b)
|
||||
return pu
|
||||
}
|
||||
|
||||
// SetNillableMoveToTarget sets the "move_to_target" field if the given value is not nil.
|
||||
func (pu *ProjectUpdate) SetNillableMoveToTarget(b *bool) *ProjectUpdate {
|
||||
if b != nil {
|
||||
pu.SetMoveToTarget(*b)
|
||||
}
|
||||
return pu
|
||||
}
|
||||
|
||||
// SetBinaryTargetPath sets the "binary_target_path" field.
|
||||
func (pu *ProjectUpdate) SetBinaryTargetPath(s string) *ProjectUpdate {
|
||||
pu.mutation.SetBinaryTargetPath(s)
|
||||
return pu
|
||||
}
|
||||
|
||||
// SetNillableBinaryTargetPath sets the "binary_target_path" field if the given value is not nil.
|
||||
func (pu *ProjectUpdate) SetNillableBinaryTargetPath(s *string) *ProjectUpdate {
|
||||
if s != nil {
|
||||
pu.SetBinaryTargetPath(*s)
|
||||
}
|
||||
return pu
|
||||
}
|
||||
|
||||
// ClearBinaryTargetPath clears the value of the "binary_target_path" field.
|
||||
func (pu *ProjectUpdate) ClearBinaryTargetPath() *ProjectUpdate {
|
||||
pu.mutation.ClearBinaryTargetPath()
|
||||
return pu
|
||||
}
|
||||
|
||||
// AddLogentryIDs adds the "logentries" edge to the Logentry entity by IDs.
|
||||
func (pu *ProjectUpdate) AddLogentryIDs(ids ...int) *ProjectUpdate {
|
||||
pu.mutation.AddLogentryIDs(ids...)
|
||||
return pu
|
||||
}
|
||||
|
||||
// AddLogentries adds the "logentries" edges to the Logentry entity.
|
||||
func (pu *ProjectUpdate) AddLogentries(l ...*Logentry) *ProjectUpdate {
|
||||
ids := make([]int, len(l))
|
||||
for i := range l {
|
||||
ids[i] = l[i].ID
|
||||
}
|
||||
return pu.AddLogentryIDs(ids...)
|
||||
}
|
||||
|
||||
// Mutation returns the ProjectMutation object of the builder.
|
||||
func (pu *ProjectUpdate) Mutation() *ProjectMutation {
|
||||
return pu.mutation
|
||||
}
|
||||
|
||||
// ClearLogentries clears all "logentries" edges to the Logentry entity.
|
||||
func (pu *ProjectUpdate) ClearLogentries() *ProjectUpdate {
|
||||
pu.mutation.ClearLogentries()
|
||||
return pu
|
||||
}
|
||||
|
||||
// RemoveLogentryIDs removes the "logentries" edge to Logentry entities by IDs.
|
||||
func (pu *ProjectUpdate) RemoveLogentryIDs(ids ...int) *ProjectUpdate {
|
||||
pu.mutation.RemoveLogentryIDs(ids...)
|
||||
return pu
|
||||
}
|
||||
|
||||
// RemoveLogentries removes "logentries" edges to Logentry entities.
|
||||
func (pu *ProjectUpdate) RemoveLogentries(l ...*Logentry) *ProjectUpdate {
|
||||
ids := make([]int, len(l))
|
||||
for i := range l {
|
||||
ids[i] = l[i].ID
|
||||
}
|
||||
return pu.RemoveLogentryIDs(ids...)
|
||||
}
|
||||
|
||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||
func (pu *ProjectUpdate) Save(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, pu.sqlSave, pu.mutation, pu.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (pu *ProjectUpdate) SaveX(ctx context.Context) int {
|
||||
affected, err := pu.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return affected
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (pu *ProjectUpdate) Exec(ctx context.Context) error {
|
||||
_, err := pu.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (pu *ProjectUpdate) ExecX(ctx context.Context) {
|
||||
if err := pu.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (pu *ProjectUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
_spec := sqlgraph.NewUpdateSpec(project.Table, project.Columns, sqlgraph.NewFieldSpec(project.FieldID, field.TypeInt))
|
||||
if ps := pu.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := pu.mutation.User(); ok {
|
||||
_spec.SetField(project.FieldUser, field.TypeString, value)
|
||||
}
|
||||
if value, ok := pu.mutation.Group(); ok {
|
||||
_spec.SetField(project.FieldGroup, field.TypeString, value)
|
||||
}
|
||||
if value, ok := pu.mutation.RootPath(); ok {
|
||||
_spec.SetField(project.FieldRootPath, field.TypeString, value)
|
||||
}
|
||||
if value, ok := pu.mutation.ServiceName(); ok {
|
||||
_spec.SetField(project.FieldServiceName, field.TypeString, value)
|
||||
}
|
||||
if value, ok := pu.mutation.BinaryPath(); ok {
|
||||
_spec.SetField(project.FieldBinaryPath, field.TypeString, value)
|
||||
}
|
||||
if value, ok := pu.mutation.MoveToTarget(); ok {
|
||||
_spec.SetField(project.FieldMoveToTarget, field.TypeBool, value)
|
||||
}
|
||||
if value, ok := pu.mutation.BinaryTargetPath(); ok {
|
||||
_spec.SetField(project.FieldBinaryTargetPath, field.TypeString, value)
|
||||
}
|
||||
if pu.mutation.BinaryTargetPathCleared() {
|
||||
_spec.ClearField(project.FieldBinaryTargetPath, field.TypeString)
|
||||
}
|
||||
if pu.mutation.LogentriesCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: project.LogentriesTable,
|
||||
Columns: []string{project.LogentriesColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(logentry.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := pu.mutation.RemovedLogentriesIDs(); len(nodes) > 0 && !pu.mutation.LogentriesCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: project.LogentriesTable,
|
||||
Columns: []string{project.LogentriesColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(logentry.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := pu.mutation.LogentriesIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: project.LogentriesTable,
|
||||
Columns: []string{project.LogentriesColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(logentry.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if n, err = sqlgraph.UpdateNodes(ctx, pu.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{project.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
pu.mutation.done = true
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// ProjectUpdateOne is the builder for updating a single Project entity.
|
||||
type ProjectUpdateOne struct {
|
||||
config
|
||||
fields []string
|
||||
hooks []Hook
|
||||
mutation *ProjectMutation
|
||||
}
|
||||
|
||||
// SetUser sets the "user" field.
|
||||
func (puo *ProjectUpdateOne) SetUser(s string) *ProjectUpdateOne {
|
||||
puo.mutation.SetUser(s)
|
||||
return puo
|
||||
}
|
||||
|
||||
// SetNillableUser sets the "user" field if the given value is not nil.
|
||||
func (puo *ProjectUpdateOne) SetNillableUser(s *string) *ProjectUpdateOne {
|
||||
if s != nil {
|
||||
puo.SetUser(*s)
|
||||
}
|
||||
return puo
|
||||
}
|
||||
|
||||
// SetGroup sets the "group" field.
|
||||
func (puo *ProjectUpdateOne) SetGroup(s string) *ProjectUpdateOne {
|
||||
puo.mutation.SetGroup(s)
|
||||
return puo
|
||||
}
|
||||
|
||||
// SetNillableGroup sets the "group" field if the given value is not nil.
|
||||
func (puo *ProjectUpdateOne) SetNillableGroup(s *string) *ProjectUpdateOne {
|
||||
if s != nil {
|
||||
puo.SetGroup(*s)
|
||||
}
|
||||
return puo
|
||||
}
|
||||
|
||||
// SetRootPath sets the "root_path" field.
|
||||
func (puo *ProjectUpdateOne) SetRootPath(s string) *ProjectUpdateOne {
|
||||
puo.mutation.SetRootPath(s)
|
||||
return puo
|
||||
}
|
||||
|
||||
// SetNillableRootPath sets the "root_path" field if the given value is not nil.
|
||||
func (puo *ProjectUpdateOne) SetNillableRootPath(s *string) *ProjectUpdateOne {
|
||||
if s != nil {
|
||||
puo.SetRootPath(*s)
|
||||
}
|
||||
return puo
|
||||
}
|
||||
|
||||
// SetServiceName sets the "service_name" field.
|
||||
func (puo *ProjectUpdateOne) SetServiceName(s string) *ProjectUpdateOne {
|
||||
puo.mutation.SetServiceName(s)
|
||||
return puo
|
||||
}
|
||||
|
||||
// SetNillableServiceName sets the "service_name" field if the given value is not nil.
|
||||
func (puo *ProjectUpdateOne) SetNillableServiceName(s *string) *ProjectUpdateOne {
|
||||
if s != nil {
|
||||
puo.SetServiceName(*s)
|
||||
}
|
||||
return puo
|
||||
}
|
||||
|
||||
// SetBinaryPath sets the "binary_path" field.
|
||||
func (puo *ProjectUpdateOne) SetBinaryPath(s string) *ProjectUpdateOne {
|
||||
puo.mutation.SetBinaryPath(s)
|
||||
return puo
|
||||
}
|
||||
|
||||
// SetNillableBinaryPath sets the "binary_path" field if the given value is not nil.
|
||||
func (puo *ProjectUpdateOne) SetNillableBinaryPath(s *string) *ProjectUpdateOne {
|
||||
if s != nil {
|
||||
puo.SetBinaryPath(*s)
|
||||
}
|
||||
return puo
|
||||
}
|
||||
|
||||
// SetMoveToTarget sets the "move_to_target" field.
|
||||
func (puo *ProjectUpdateOne) SetMoveToTarget(b bool) *ProjectUpdateOne {
|
||||
puo.mutation.SetMoveToTarget(b)
|
||||
return puo
|
||||
}
|
||||
|
||||
// SetNillableMoveToTarget sets the "move_to_target" field if the given value is not nil.
|
||||
func (puo *ProjectUpdateOne) SetNillableMoveToTarget(b *bool) *ProjectUpdateOne {
|
||||
if b != nil {
|
||||
puo.SetMoveToTarget(*b)
|
||||
}
|
||||
return puo
|
||||
}
|
||||
|
||||
// SetBinaryTargetPath sets the "binary_target_path" field.
|
||||
func (puo *ProjectUpdateOne) SetBinaryTargetPath(s string) *ProjectUpdateOne {
|
||||
puo.mutation.SetBinaryTargetPath(s)
|
||||
return puo
|
||||
}
|
||||
|
||||
// SetNillableBinaryTargetPath sets the "binary_target_path" field if the given value is not nil.
|
||||
func (puo *ProjectUpdateOne) SetNillableBinaryTargetPath(s *string) *ProjectUpdateOne {
|
||||
if s != nil {
|
||||
puo.SetBinaryTargetPath(*s)
|
||||
}
|
||||
return puo
|
||||
}
|
||||
|
||||
// ClearBinaryTargetPath clears the value of the "binary_target_path" field.
|
||||
func (puo *ProjectUpdateOne) ClearBinaryTargetPath() *ProjectUpdateOne {
|
||||
puo.mutation.ClearBinaryTargetPath()
|
||||
return puo
|
||||
}
|
||||
|
||||
// AddLogentryIDs adds the "logentries" edge to the Logentry entity by IDs.
|
||||
func (puo *ProjectUpdateOne) AddLogentryIDs(ids ...int) *ProjectUpdateOne {
|
||||
puo.mutation.AddLogentryIDs(ids...)
|
||||
return puo
|
||||
}
|
||||
|
||||
// AddLogentries adds the "logentries" edges to the Logentry entity.
|
||||
func (puo *ProjectUpdateOne) AddLogentries(l ...*Logentry) *ProjectUpdateOne {
|
||||
ids := make([]int, len(l))
|
||||
for i := range l {
|
||||
ids[i] = l[i].ID
|
||||
}
|
||||
return puo.AddLogentryIDs(ids...)
|
||||
}
|
||||
|
||||
// Mutation returns the ProjectMutation object of the builder.
|
||||
func (puo *ProjectUpdateOne) Mutation() *ProjectMutation {
|
||||
return puo.mutation
|
||||
}
|
||||
|
||||
// ClearLogentries clears all "logentries" edges to the Logentry entity.
|
||||
func (puo *ProjectUpdateOne) ClearLogentries() *ProjectUpdateOne {
|
||||
puo.mutation.ClearLogentries()
|
||||
return puo
|
||||
}
|
||||
|
||||
// RemoveLogentryIDs removes the "logentries" edge to Logentry entities by IDs.
|
||||
func (puo *ProjectUpdateOne) RemoveLogentryIDs(ids ...int) *ProjectUpdateOne {
|
||||
puo.mutation.RemoveLogentryIDs(ids...)
|
||||
return puo
|
||||
}
|
||||
|
||||
// RemoveLogentries removes "logentries" edges to Logentry entities.
|
||||
func (puo *ProjectUpdateOne) RemoveLogentries(l ...*Logentry) *ProjectUpdateOne {
|
||||
ids := make([]int, len(l))
|
||||
for i := range l {
|
||||
ids[i] = l[i].ID
|
||||
}
|
||||
return puo.RemoveLogentryIDs(ids...)
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ProjectUpdate builder.
|
||||
func (puo *ProjectUpdateOne) Where(ps ...predicate.Project) *ProjectUpdateOne {
|
||||
puo.mutation.Where(ps...)
|
||||
return puo
|
||||
}
|
||||
|
||||
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||
// The default is selecting all fields defined in the entity schema.
|
||||
func (puo *ProjectUpdateOne) Select(field string, fields ...string) *ProjectUpdateOne {
|
||||
puo.fields = append([]string{field}, fields...)
|
||||
return puo
|
||||
}
|
||||
|
||||
// Save executes the query and returns the updated Project entity.
|
||||
func (puo *ProjectUpdateOne) Save(ctx context.Context) (*Project, error) {
|
||||
return withHooks(ctx, puo.sqlSave, puo.mutation, puo.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (puo *ProjectUpdateOne) SaveX(ctx context.Context) *Project {
|
||||
node, err := puo.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// Exec executes the query on the entity.
|
||||
func (puo *ProjectUpdateOne) Exec(ctx context.Context) error {
|
||||
_, err := puo.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (puo *ProjectUpdateOne) ExecX(ctx context.Context) {
|
||||
if err := puo.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (puo *ProjectUpdateOne) sqlSave(ctx context.Context) (_node *Project, err error) {
|
||||
_spec := sqlgraph.NewUpdateSpec(project.Table, project.Columns, sqlgraph.NewFieldSpec(project.FieldID, field.TypeInt))
|
||||
id, ok := puo.mutation.ID()
|
||||
if !ok {
|
||||
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Project.id" for update`)}
|
||||
}
|
||||
_spec.Node.ID.Value = id
|
||||
if fields := puo.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, project.FieldID)
|
||||
for _, f := range fields {
|
||||
if !project.ValidColumn(f) {
|
||||
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
if f != project.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := puo.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := puo.mutation.User(); ok {
|
||||
_spec.SetField(project.FieldUser, field.TypeString, value)
|
||||
}
|
||||
if value, ok := puo.mutation.Group(); ok {
|
||||
_spec.SetField(project.FieldGroup, field.TypeString, value)
|
||||
}
|
||||
if value, ok := puo.mutation.RootPath(); ok {
|
||||
_spec.SetField(project.FieldRootPath, field.TypeString, value)
|
||||
}
|
||||
if value, ok := puo.mutation.ServiceName(); ok {
|
||||
_spec.SetField(project.FieldServiceName, field.TypeString, value)
|
||||
}
|
||||
if value, ok := puo.mutation.BinaryPath(); ok {
|
||||
_spec.SetField(project.FieldBinaryPath, field.TypeString, value)
|
||||
}
|
||||
if value, ok := puo.mutation.MoveToTarget(); ok {
|
||||
_spec.SetField(project.FieldMoveToTarget, field.TypeBool, value)
|
||||
}
|
||||
if value, ok := puo.mutation.BinaryTargetPath(); ok {
|
||||
_spec.SetField(project.FieldBinaryTargetPath, field.TypeString, value)
|
||||
}
|
||||
if puo.mutation.BinaryTargetPathCleared() {
|
||||
_spec.ClearField(project.FieldBinaryTargetPath, field.TypeString)
|
||||
}
|
||||
if puo.mutation.LogentriesCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: project.LogentriesTable,
|
||||
Columns: []string{project.LogentriesColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(logentry.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := puo.mutation.RemovedLogentriesIDs(); len(nodes) > 0 && !puo.mutation.LogentriesCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: project.LogentriesTable,
|
||||
Columns: []string{project.LogentriesColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(logentry.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := puo.mutation.LogentriesIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: project.LogentriesTable,
|
||||
Columns: []string{project.LogentriesColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(logentry.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
_node = &Project{config: puo.config}
|
||||
_spec.Assign = _node.assignValues
|
||||
_spec.ScanValues = _node.scanValues
|
||||
if err = sqlgraph.UpdateNode(ctx, puo.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{project.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
puo.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
33
ent/runtime.go
Normal file
33
ent/runtime.go
Normal file
@ -0,0 +1,33 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"code.icod.de/dalu/gomanager/ent/logentry"
|
||||
"code.icod.de/dalu/gomanager/ent/project"
|
||||
"code.icod.de/dalu/gomanager/ent/schema"
|
||||
)
|
||||
|
||||
// The init function reads all schema descriptors with runtime code
|
||||
// (default values, validators, hooks and policies) and stitches it
|
||||
// to their package variables.
|
||||
func init() {
|
||||
logentryFields := schema.Logentry{}.Fields()
|
||||
_ = logentryFields
|
||||
// logentryDescDate is the schema descriptor for date field.
|
||||
logentryDescDate := logentryFields[1].Descriptor()
|
||||
// logentry.DefaultDate holds the default value on creation for the date field.
|
||||
logentry.DefaultDate = logentryDescDate.Default.(func() time.Time)
|
||||
projectFields := schema.Project{}.Fields()
|
||||
_ = projectFields
|
||||
// projectDescCreateTime is the schema descriptor for create_time field.
|
||||
projectDescCreateTime := projectFields[1].Descriptor()
|
||||
// project.DefaultCreateTime holds the default value on creation for the create_time field.
|
||||
project.DefaultCreateTime = projectDescCreateTime.Default.(func() time.Time)
|
||||
// projectDescMoveToTarget is the schema descriptor for move_to_target field.
|
||||
projectDescMoveToTarget := projectFields[7].Descriptor()
|
||||
// project.DefaultMoveToTarget holds the default value on creation for the move_to_target field.
|
||||
project.DefaultMoveToTarget = projectDescMoveToTarget.Default.(bool)
|
||||
}
|
10
ent/runtime/runtime.go
Normal file
10
ent/runtime/runtime.go
Normal file
@ -0,0 +1,10 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package runtime
|
||||
|
||||
// The schema-stitching logic is generated in code.icod.de/dalu/gomanager/ent/runtime.go
|
||||
|
||||
const (
|
||||
Version = "v0.12.5" // Version of ent codegen.
|
||||
Sum = "h1:KREM5E4CSoej4zeGa88Ou/gfturAnpUv0mzAjch1sj4=" // Sum of ent codegen.
|
||||
)
|
29
ent/schema/logentry.go
Normal file
29
ent/schema/logentry.go
Normal file
@ -0,0 +1,29 @@
|
||||
package schema
|
||||
|
||||
import (
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/schema/edge"
|
||||
"entgo.io/ent/schema/field"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Logentry holds the schema definition for the Logentry entity.
|
||||
type Logentry struct {
|
||||
ent.Schema
|
||||
}
|
||||
|
||||
// Fields of the Logentry.
|
||||
func (Logentry) Fields() []ent.Field {
|
||||
return []ent.Field{
|
||||
field.Int("id"),
|
||||
field.Time("date").Default(time.Now).Immutable(),
|
||||
field.Text("content"),
|
||||
}
|
||||
}
|
||||
|
||||
// Edges of the Logentry.
|
||||
func (Logentry) Edges() []ent.Edge {
|
||||
return []ent.Edge{
|
||||
edge.From("project", Project.Type).Ref("logentries").Unique(),
|
||||
}
|
||||
}
|
35
ent/schema/project.go
Normal file
35
ent/schema/project.go
Normal file
@ -0,0 +1,35 @@
|
||||
package schema
|
||||
|
||||
import (
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/schema/edge"
|
||||
"entgo.io/ent/schema/field"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Project holds the schema definition for the Project entity.
|
||||
type Project struct {
|
||||
ent.Schema
|
||||
}
|
||||
|
||||
// Fields of the Project.
|
||||
func (Project) Fields() []ent.Field {
|
||||
return []ent.Field{
|
||||
field.Int("id"),
|
||||
field.Time("create_time").Default(time.Now).Immutable(),
|
||||
field.String("user"),
|
||||
field.String("group"),
|
||||
field.String("root_path"),
|
||||
field.String("service_name"),
|
||||
field.String("binary_path"),
|
||||
field.Bool("move_to_target").Default(false),
|
||||
field.String("binary_target_path").Optional(),
|
||||
}
|
||||
}
|
||||
|
||||
// Edges of the Project.
|
||||
func (Project) Edges() []ent.Edge {
|
||||
return []ent.Edge{
|
||||
edge.To("logentries", Logentry.Type),
|
||||
}
|
||||
}
|
213
ent/tx.go
Normal file
213
ent/tx.go
Normal file
@ -0,0 +1,213 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"entgo.io/ent/dialect"
|
||||
)
|
||||
|
||||
// Tx is a transactional client that is created by calling Client.Tx().
|
||||
type Tx struct {
|
||||
config
|
||||
// Logentry is the client for interacting with the Logentry builders.
|
||||
Logentry *LogentryClient
|
||||
// Project is the client for interacting with the Project builders.
|
||||
Project *ProjectClient
|
||||
|
||||
// lazily loaded.
|
||||
client *Client
|
||||
clientOnce sync.Once
|
||||
// ctx lives for the life of the transaction. It is
|
||||
// the same context used by the underlying connection.
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
type (
|
||||
// Committer is the interface that wraps the Commit method.
|
||||
Committer interface {
|
||||
Commit(context.Context, *Tx) error
|
||||
}
|
||||
|
||||
// The CommitFunc type is an adapter to allow the use of ordinary
|
||||
// function as a Committer. If f is a function with the appropriate
|
||||
// signature, CommitFunc(f) is a Committer that calls f.
|
||||
CommitFunc func(context.Context, *Tx) error
|
||||
|
||||
// CommitHook defines the "commit middleware". A function that gets a Committer
|
||||
// and returns a Committer. For example:
|
||||
//
|
||||
// hook := func(next ent.Committer) ent.Committer {
|
||||
// return ent.CommitFunc(func(ctx context.Context, tx *ent.Tx) error {
|
||||
// // Do some stuff before.
|
||||
// if err := next.Commit(ctx, tx); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// // Do some stuff after.
|
||||
// return nil
|
||||
// })
|
||||
// }
|
||||
//
|
||||
CommitHook func(Committer) Committer
|
||||
)
|
||||
|
||||
// Commit calls f(ctx, m).
|
||||
func (f CommitFunc) Commit(ctx context.Context, tx *Tx) error {
|
||||
return f(ctx, tx)
|
||||
}
|
||||
|
||||
// Commit commits the transaction.
|
||||
func (tx *Tx) Commit() error {
|
||||
txDriver := tx.config.driver.(*txDriver)
|
||||
var fn Committer = CommitFunc(func(context.Context, *Tx) error {
|
||||
return txDriver.tx.Commit()
|
||||
})
|
||||
txDriver.mu.Lock()
|
||||
hooks := append([]CommitHook(nil), txDriver.onCommit...)
|
||||
txDriver.mu.Unlock()
|
||||
for i := len(hooks) - 1; i >= 0; i-- {
|
||||
fn = hooks[i](fn)
|
||||
}
|
||||
return fn.Commit(tx.ctx, tx)
|
||||
}
|
||||
|
||||
// OnCommit adds a hook to call on commit.
|
||||
func (tx *Tx) OnCommit(f CommitHook) {
|
||||
txDriver := tx.config.driver.(*txDriver)
|
||||
txDriver.mu.Lock()
|
||||
txDriver.onCommit = append(txDriver.onCommit, f)
|
||||
txDriver.mu.Unlock()
|
||||
}
|
||||
|
||||
type (
|
||||
// Rollbacker is the interface that wraps the Rollback method.
|
||||
Rollbacker interface {
|
||||
Rollback(context.Context, *Tx) error
|
||||
}
|
||||
|
||||
// The RollbackFunc type is an adapter to allow the use of ordinary
|
||||
// function as a Rollbacker. If f is a function with the appropriate
|
||||
// signature, RollbackFunc(f) is a Rollbacker that calls f.
|
||||
RollbackFunc func(context.Context, *Tx) error
|
||||
|
||||
// RollbackHook defines the "rollback middleware". A function that gets a Rollbacker
|
||||
// and returns a Rollbacker. For example:
|
||||
//
|
||||
// hook := func(next ent.Rollbacker) ent.Rollbacker {
|
||||
// return ent.RollbackFunc(func(ctx context.Context, tx *ent.Tx) error {
|
||||
// // Do some stuff before.
|
||||
// if err := next.Rollback(ctx, tx); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// // Do some stuff after.
|
||||
// return nil
|
||||
// })
|
||||
// }
|
||||
//
|
||||
RollbackHook func(Rollbacker) Rollbacker
|
||||
)
|
||||
|
||||
// Rollback calls f(ctx, m).
|
||||
func (f RollbackFunc) Rollback(ctx context.Context, tx *Tx) error {
|
||||
return f(ctx, tx)
|
||||
}
|
||||
|
||||
// Rollback rollbacks the transaction.
|
||||
func (tx *Tx) Rollback() error {
|
||||
txDriver := tx.config.driver.(*txDriver)
|
||||
var fn Rollbacker = RollbackFunc(func(context.Context, *Tx) error {
|
||||
return txDriver.tx.Rollback()
|
||||
})
|
||||
txDriver.mu.Lock()
|
||||
hooks := append([]RollbackHook(nil), txDriver.onRollback...)
|
||||
txDriver.mu.Unlock()
|
||||
for i := len(hooks) - 1; i >= 0; i-- {
|
||||
fn = hooks[i](fn)
|
||||
}
|
||||
return fn.Rollback(tx.ctx, tx)
|
||||
}
|
||||
|
||||
// OnRollback adds a hook to call on rollback.
|
||||
func (tx *Tx) OnRollback(f RollbackHook) {
|
||||
txDriver := tx.config.driver.(*txDriver)
|
||||
txDriver.mu.Lock()
|
||||
txDriver.onRollback = append(txDriver.onRollback, f)
|
||||
txDriver.mu.Unlock()
|
||||
}
|
||||
|
||||
// Client returns a Client that binds to current transaction.
|
||||
func (tx *Tx) Client() *Client {
|
||||
tx.clientOnce.Do(func() {
|
||||
tx.client = &Client{config: tx.config}
|
||||
tx.client.init()
|
||||
})
|
||||
return tx.client
|
||||
}
|
||||
|
||||
func (tx *Tx) init() {
|
||||
tx.Logentry = NewLogentryClient(tx.config)
|
||||
tx.Project = NewProjectClient(tx.config)
|
||||
}
|
||||
|
||||
// txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation.
|
||||
// The idea is to support transactions without adding any extra code to the builders.
|
||||
// When a builder calls to driver.Tx(), it gets the same dialect.Tx instance.
|
||||
// Commit and Rollback are nop for the internal builders and the user must call one
|
||||
// of them in order to commit or rollback the transaction.
|
||||
//
|
||||
// If a closed transaction is embedded in one of the generated entities, and the entity
|
||||
// applies a query, for example: Logentry.QueryXXX(), the query will be executed
|
||||
// through the driver which created this transaction.
|
||||
//
|
||||
// Note that txDriver is not goroutine safe.
|
||||
type txDriver struct {
|
||||
// the driver we started the transaction from.
|
||||
drv dialect.Driver
|
||||
// tx is the underlying transaction.
|
||||
tx dialect.Tx
|
||||
// completion hooks.
|
||||
mu sync.Mutex
|
||||
onCommit []CommitHook
|
||||
onRollback []RollbackHook
|
||||
}
|
||||
|
||||
// newTx creates a new transactional driver.
|
||||
func newTx(ctx context.Context, drv dialect.Driver) (*txDriver, error) {
|
||||
tx, err := drv.Tx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &txDriver{tx: tx, drv: drv}, nil
|
||||
}
|
||||
|
||||
// Tx returns the transaction wrapper (txDriver) to avoid Commit or Rollback calls
|
||||
// from the internal builders. Should be called only by the internal builders.
|
||||
func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil }
|
||||
|
||||
// Dialect returns the dialect of the driver we started the transaction from.
|
||||
func (tx *txDriver) Dialect() string { return tx.drv.Dialect() }
|
||||
|
||||
// Close is a nop close.
|
||||
func (*txDriver) Close() error { return nil }
|
||||
|
||||
// Commit is a nop commit for the internal builders.
|
||||
// User must call `Tx.Commit` in order to commit the transaction.
|
||||
func (*txDriver) Commit() error { return nil }
|
||||
|
||||
// Rollback is a nop rollback for the internal builders.
|
||||
// User must call `Tx.Rollback` in order to rollback the transaction.
|
||||
func (*txDriver) Rollback() error { return nil }
|
||||
|
||||
// Exec calls tx.Exec.
|
||||
func (tx *txDriver) Exec(ctx context.Context, query string, args, v any) error {
|
||||
return tx.tx.Exec(ctx, query, args, v)
|
||||
}
|
||||
|
||||
// Query calls tx.Query.
|
||||
func (tx *txDriver) Query(ctx context.Context, query string, args, v any) error {
|
||||
return tx.tx.Query(ctx, query, args, v)
|
||||
}
|
||||
|
||||
var _ dialect.Driver = (*txDriver)(nil)
|
29
go.mod
Normal file
29
go.mod
Normal file
@ -0,0 +1,29 @@
|
||||
module code.icod.de/dalu/gomanager
|
||||
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
entgo.io/ent v0.12.5
|
||||
github.com/jedib0t/go-pretty/v6 v6.4.9
|
||||
github.com/mattn/go-sqlite3 v1.14.16
|
||||
github.com/spf13/cobra v1.8.0
|
||||
)
|
||||
|
||||
require (
|
||||
ariga.io/atlas v0.14.1-0.20230918065911-83ad451a4935 // indirect
|
||||
github.com/agext/levenshtein v1.2.1 // indirect
|
||||
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
|
||||
github.com/go-openapi/inflect v0.19.0 // indirect
|
||||
github.com/google/go-cmp v0.5.6 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/hashicorp/hcl/v2 v2.13.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/zclconf/go-cty v1.8.0 // indirect
|
||||
golang.org/x/mod v0.10.0 // indirect
|
||||
golang.org/x/sys v0.7.0 // indirect
|
||||
golang.org/x/text v0.8.0 // indirect
|
||||
)
|
91
go.sum
Normal file
91
go.sum
Normal file
@ -0,0 +1,91 @@
|
||||
ariga.io/atlas v0.14.1-0.20230918065911-83ad451a4935 h1:JnYs/y8RJ3+MiIUp+3RgyyeO48VHLAZimqiaZYnMKk8=
|
||||
ariga.io/atlas v0.14.1-0.20230918065911-83ad451a4935/go.mod h1:isZrlzJ5cpoCoKFoY9knZug7Lq4pP1cm8g3XciLZ0Pw=
|
||||
entgo.io/ent v0.12.5 h1:KREM5E4CSoej4zeGa88Ou/gfturAnpUv0mzAjch1sj4=
|
||||
entgo.io/ent v0.12.5/go.mod h1:Y3JVAjtlIk8xVZYSn3t3mf8xlZIn5SAOXZQxD6kKI+Q=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8=
|
||||
github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
||||
github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw=
|
||||
github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4=
|
||||
github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4=
|
||||
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
|
||||
github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/hashicorp/hcl/v2 v2.13.0 h1:0Apadu1w6M11dyGFxWnmhhcMjkbAiKCv7G1r/2QgCNc=
|
||||
github.com/hashicorp/hcl/v2 v2.13.0/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jedib0t/go-pretty/v6 v6.4.9 h1:vZ6bjGg2eBSrJn365qlxGcaWu09Id+LHtrfDWlB2Usc=
|
||||
github.com/jedib0t/go-pretty/v6 v6.4.9/go.mod h1:Ndk3ase2CkQbXLLNf5QDHoYb6J9WtVfmHZu9n8rk2xs=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4=
|
||||
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
|
||||
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
|
||||
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM=
|
||||
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
||||
github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.4/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4=
|
||||
github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI=
|
||||
github.com/zclconf/go-cty v1.8.0 h1:s4AvqaeQzJIu3ndv4gVIhplVD0krU+bgrcLSVUnaWuA=
|
||||
github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
||||
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
|
||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
10
main.go
Normal file
10
main.go
Normal file
@ -0,0 +1,10 @@
|
||||
/*
|
||||
Copyright © 2023 Darko Luketic <info@icod.de>
|
||||
*/
|
||||
package main
|
||||
|
||||
import "code.icod.de/dalu/gomanager/cmd"
|
||||
|
||||
func main() {
|
||||
cmd.Execute()
|
||||
}
|
111
runner/runner.go
Normal file
111
runner/runner.go
Normal file
@ -0,0 +1,111 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"code.icod.de/dalu/gomanager/ent"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Runner struct {
|
||||
client *ent.Client
|
||||
}
|
||||
|
||||
func NewRunner(client *ent.Client) *Runner {
|
||||
r := new(Runner)
|
||||
r.client = client
|
||||
return r
|
||||
}
|
||||
|
||||
// Run checks all projects sequentially
|
||||
func (r *Runner) Run() error {
|
||||
ms, e := r.client.Project.Query().All(context.Background())
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
if len(ms) == 0 {
|
||||
return nil
|
||||
}
|
||||
v, e := getCurrentGoVersion()
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
for _, m := range ms {
|
||||
fv, e := getVersionOfFile(fmt.Sprintf("%s/%s", m.RootPath, m.BinaryPath))
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
if v != fv {
|
||||
l, e := goBuildBinary(m.RootPath)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
if e := chownBinary(m.User, m.Group, fmt.Sprintf("%s/%s", m.RootPath, m.BinaryPath)); e != nil {
|
||||
return e
|
||||
}
|
||||
_, e = r.client.Logentry.
|
||||
Create().
|
||||
SetContent(l).
|
||||
Save(context.Background())
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getCurrentGoVersion returns the version of the installed go package or a non-nil error
|
||||
func getCurrentGoVersion() (string, error) {
|
||||
cmd := exec.Command("go", "version")
|
||||
var out strings.Builder
|
||||
cmd.Stdout = &out
|
||||
if e := cmd.Run(); e != nil {
|
||||
return "", e
|
||||
}
|
||||
split := strings.Split(out.String(), " ")
|
||||
version := split[2]
|
||||
return version, nil
|
||||
}
|
||||
|
||||
// getVersionOfFile returns the version of a go binary or an non-nil error
|
||||
func getVersionOfFile(file string) (string, error) {
|
||||
cmd := exec.Command("go", "version", file)
|
||||
var out strings.Builder
|
||||
cmd.Stdout = &out
|
||||
if e := cmd.Run(); e != nil {
|
||||
return "", e
|
||||
}
|
||||
split := strings.Split(out.String(), " ")
|
||||
version := split[1]
|
||||
return version, nil
|
||||
}
|
||||
|
||||
// goBuildBinary does a go build in the root directory, then "cd"s back to the previous working directory.
|
||||
func goBuildBinary(root string) (string, error) {
|
||||
cwd, e := os.Getwd()
|
||||
if e != nil {
|
||||
return "", e
|
||||
}
|
||||
if e := os.Chdir(root); e != nil {
|
||||
return "", e
|
||||
}
|
||||
|
||||
cmd := exec.Command("go", "build")
|
||||
var out strings.Builder
|
||||
cmd.Stdout = &out
|
||||
if e := cmd.Run(); e != nil {
|
||||
return "", e
|
||||
}
|
||||
if e := os.Chdir(cwd); e != nil {
|
||||
return "", e
|
||||
}
|
||||
return out.String(), nil
|
||||
}
|
||||
|
||||
func chownBinary(user, group, file string) error {
|
||||
cmd := exec.Command("chown", fmt.Sprintf("%s:%s", user, group), file)
|
||||
return cmd.Run()
|
||||
}
|
Loading…
Reference in New Issue
Block a user