Documentation ¶
Index ¶
- Constants
- Variables
- func CodeGeneratorGetReverseFieldOwnerName(modelPkg *models.ModelPkg, pkgName string, pkgPath string, pkgGoPath string)
- func MultiCodeGeneratorBackRepo(modelPkg *models.ModelPkg, pkgName string, pkgGoPath string, dirPath string)
- func RemoveTargetedLines(path string, target string) error
- func ReplaceInFile(filePath string, toReplace string, replacement string) error
- type BackRepoDataSubTemplateInsertion
- type BackRepoInsertionPoint
- type BackRepoPerStructSubTemplate
- type BackRepoSubTemplateInsertion
- type DBliteInsertionPointId
- type GetInstanceDBFromInstanceSubTemplateInsertion
- type GetReverseFieldOwnerNameId
- type GetReverseFieldOwnerNameSubTemplateId
Constants ¶
View Source
const BackRepoDataTemplateCode = `// generated code - do not edit package orm type BackRepoData struct { // insertion point for slices{{` + string(rune(BackRepoDataSlice)) + `}} } func CopyBackRepoToBackRepoData(backRepo *BackRepoStruct, backRepoData *BackRepoData) { // wait till backRepo is written by commit backRepo.rwMutex.RLock() defer backRepo.rwMutex.RUnlock() // insertion point for slices copies{{` + string(rune(BackRepoDataSliceCopies)) + `}} } `
View Source
const BackRepoPerStructTemplateCode = `// generated by stacks/gong/go/models/orm_file_per_struct_back_repo.go package orm import ( "database/sql" "encoding/json" "errors" "fmt" "io/ioutil" "log" "os" "path/filepath" "sort" "time" "gorm.io/gorm" "github.com/tealeg/xlsx/v3" "{{PkgPathDb}}" "{{PkgPathRoot}}" ) // dummy variable to have the import declaration wihthout compile failure (even if no code needing this import is generated) var dummy_{{Structname}}_sql sql.NullBool var dummy_{{Structname}}_time time.Duration var dummy_{{Structname}}_sort sort.Float64Slice // {{Structname}}API is the input in POST API // // for POST, API, one needs the fields of the model as well as the fields // from associations ("Has One" and "Has Many") that are generated to // fullfill the ORM requirements for associations // // swagger:model {{structname}}API type {{Structname}}API struct { gorm.Model models.{{Structname}}_WOP // encoding of pointers // for API, it cannot be embedded {{Structname}}PointersEncoding {{Structname}}PointersEncoding } // {{Structname}}PointersEncoding encodes pointers to Struct and // reverse pointers of slice of poitners to Struct type {{Structname}}PointersEncoding struct { // insertion for pointer fields encoding declaration{{` + string(rune(BackRepoPointerEncodingFieldsDeclaration)) + `}} } // {{Structname}}DB describes a {{structname}} in the database // // It incorporates the GORM ID, basic fields from the model (because they can be serialized), // the encoded version of pointers // // swagger:model {{structname}}DB type {{Structname}}DB struct { gorm.Model // insertion for basic fields declaration{{` + string(rune(BackRepoBasicFieldsDeclaration)) + `}} // encoding of pointers // for GORM serialization, it is necessary to embed to Pointer Encoding declaration {{Structname}}PointersEncoding } // {{Structname}}DBs arrays {{structname}}DBs // swagger:response {{structname}}DBsResponse type {{Structname}}DBs []{{Structname}}DB // {{Structname}}DBResponse provides response // swagger:response {{structname}}DBResponse type {{Structname}}DBResponse struct { {{Structname}}DB } // {{Structname}}WOP is a {{Structname}} without pointers (WOP is an acronym for "Without Pointers") // it holds the same basic fields but pointers are encoded into uint type {{Structname}}WOP struct { ID int{{` + string(rune(BackRepoWOPInitialIndex)) + `}} // insertion for WOP basic fields{{` + string(rune(BackRepoBasicAndTimeFieldsWOPDeclaration)) + `}} // insertion for WOP pointer fields{{` + string(rune(BackRepoPointerEncodingFieldsWOPDeclaration)) + `}} } var {{Structname}}_Fields = []string{ // insertion for WOP basic fields{{` + string(rune(BackRepoBasicAndTimeFieldsName)) + `}} } type BackRepo{{Structname}}Struct struct { // stores {{Structname}}DB according to their gorm ID Map_{{Structname}}DBID_{{Structname}}DB map[uint]*{{Structname}}DB // stores {{Structname}}DB ID according to {{Structname}} address Map_{{Structname}}Ptr_{{Structname}}DBID map[*models.{{Structname}}]uint // stores {{Structname}} according to their gorm ID Map_{{Structname}}DBID_{{Structname}}Ptr map[uint]*models.{{Structname}} db db.DBInterface stage *models.StageStruct } func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) GetStage() (stage *models.StageStruct) { stage = backRepo{{Structname}}.stage return } func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) GetDB() db.DBInterface { return backRepo{{Structname}}.db } // Get{{Structname}}DBFrom{{Structname}}Ptr is a handy function to access the back repo instance from the stage instance func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) Get{{Structname}}DBFrom{{Structname}}Ptr({{structname}} *models.{{Structname}}) ({{structname}}DB *{{Structname}}DB) { id := backRepo{{Structname}}.Map_{{Structname}}Ptr_{{Structname}}DBID[{{structname}}] {{structname}}DB = backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB[id] return } // BackRepo{{Structname}}.CommitPhaseOne commits all staged instances of {{Structname}} to the BackRepo // Phase One is the creation of instance in the database if it is not yet done to get the unique ID for each staged instance func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) CommitPhaseOne(stage *models.StageStruct) (Error error) { for {{structname}} := range stage.{{Structname}}s { backRepo{{Structname}}.CommitPhaseOneInstance({{structname}}) } // parse all backRepo instance and checks wether some instance have been unstaged // in this case, remove them from the back repo for id, {{structname}} := range backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}Ptr { if _, ok := stage.{{Structname}}s[{{structname}}]; !ok { backRepo{{Structname}}.CommitDeleteInstance(id) } } return } // BackRepo{{Structname}}.CommitDeleteInstance commits deletion of {{Structname}} to the BackRepo func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) CommitDeleteInstance(id uint) (Error error) { {{structname}} := backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}Ptr[id] // {{structname}} is not staged anymore, remove {{structname}}DB {{structname}}DB := backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB[id] db, _ := backRepo{{Structname}}.db.Unscoped() _, err := db.Delete({{structname}}DB) if err != nil { log.Fatal(err) } // update stores delete(backRepo{{Structname}}.Map_{{Structname}}Ptr_{{Structname}}DBID, {{structname}}) delete(backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}Ptr, id) delete(backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB, id) return } // BackRepo{{Structname}}.CommitPhaseOneInstance commits {{structname}} staged instances of {{Structname}} to the BackRepo // Phase One is the creation of instance in the database if it is not yet done to get the unique ID for each staged instance func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) CommitPhaseOneInstance({{structname}} *models.{{Structname}}) (Error error) { // check if the {{structname}} is not commited yet if _, ok := backRepo{{Structname}}.Map_{{Structname}}Ptr_{{Structname}}DBID[{{structname}}]; ok { return } // initiate {{structname}} var {{structname}}DB {{Structname}}DB {{structname}}DB.CopyBasicFieldsFrom{{Structname}}({{structname}}) _, err := backRepo{{Structname}}.db.Create(&{{structname}}DB) if err != nil { log.Fatal(err) } // update stores backRepo{{Structname}}.Map_{{Structname}}Ptr_{{Structname}}DBID[{{structname}}] = {{structname}}DB.ID backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}Ptr[{{structname}}DB.ID] = {{structname}} backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB[{{structname}}DB.ID] = &{{structname}}DB return } // BackRepo{{Structname}}.CommitPhaseTwo commits all staged instances of {{Structname}} to the BackRepo // Phase Two is the update of instance with the field in the database func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) CommitPhaseTwo(backRepo *BackRepoStruct) (Error error) { for idx, {{structname}} := range backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}Ptr { backRepo{{Structname}}.CommitPhaseTwoInstance(backRepo, idx, {{structname}}) } return } // BackRepo{{Structname}}.CommitPhaseTwoInstance commits {{structname }} of models.{{Structname}} to the BackRepo // Phase Two is the update of instance with the field in the database func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) CommitPhaseTwoInstance(backRepo *BackRepoStruct, idx uint, {{structname}} *models.{{Structname}}) (Error error) { // fetch matching {{structname}}DB if {{structname}}DB, ok := backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB[idx]; ok { {{structname}}DB.CopyBasicFieldsFrom{{Structname}}({{structname}}) // insertion point for translating pointers encodings into actual pointers{{` + string(rune(BackRepoPointerEncodingFieldsCommit)) + `}} _, err := backRepo{{Structname}}.db.Save({{structname}}DB) if err != nil { log.Fatal(err) } } else { err := errors.New( fmt.Sprintf("Unkown {{Structname}} intance %s", {{structname}}.Name)) return err } return } // BackRepo{{Structname}}.CheckoutPhaseOne Checkouts all BackRepo instances to the Stage // // Phase One will result in having instances on the stage aligned with the back repo // pointers are not initialized yet (this is for phase two) func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) CheckoutPhaseOne() (Error error) { {{structname}}DBArray := make([]{{Structname}}DB, 0) _, err := backRepo{{Structname}}.db.Find(&{{structname}}DBArray) if err != nil { return err } // list of instances to be removed // start from the initial map on the stage and remove instances that have been checked out {{structname}}InstancesToBeRemovedFromTheStage := make(map[*models.{{Structname}}]any) for key, value := range backRepo{{Structname}}.stage.{{Structname}}s { {{structname}}InstancesToBeRemovedFromTheStage[key] = value } // copy orm objects to the the map for _, {{structname}}DB := range {{structname}}DBArray { backRepo{{Structname}}.CheckoutPhaseOneInstance(&{{structname}}DB) // do not remove this instance from the stage, therefore // remove instance from the list of instances to be be removed from the stage {{structname}}, ok := backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}Ptr[{{structname}}DB.ID] if ok { delete({{structname}}InstancesToBeRemovedFromTheStage, {{structname}}) } } // remove from stage and back repo's 3 maps all {{structname}}s that are not in the checkout for {{structname}} := range {{structname}}InstancesToBeRemovedFromTheStage { {{structname}}.Unstage(backRepo{{Structname}}.GetStage()) // remove instance from the back repo 3 maps {{structname}}ID := backRepo{{Structname}}.Map_{{Structname}}Ptr_{{Structname}}DBID[{{structname}}] delete(backRepo{{Structname}}.Map_{{Structname}}Ptr_{{Structname}}DBID, {{structname}}) delete(backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB, {{structname}}ID) delete(backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}Ptr, {{structname}}ID) } return } // CheckoutPhaseOneInstance takes a {{structname}}DB that has been found in the DB, updates the backRepo and stages the // models version of the {{structname}}DB func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) CheckoutPhaseOneInstance({{structname}}DB *{{Structname}}DB) (Error error) { {{structname}}, ok := backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}Ptr[{{structname}}DB.ID] if !ok { {{structname}} = new(models.{{Structname}}) backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}Ptr[{{structname}}DB.ID] = {{structname}} backRepo{{Structname}}.Map_{{Structname}}Ptr_{{Structname}}DBID[{{structname}}] = {{structname}}DB.ID // append model store with the new element {{structname}}.Name = {{structname}}DB.Name_Data.String {{structname}}.Stage(backRepo{{Structname}}.GetStage()) } {{structname}}DB.CopyBasicFieldsTo{{Structname}}({{structname}}) // in some cases, the instance might have been unstaged. It is necessary to stage it again {{structname}}.Stage(backRepo{{Structname}}.GetStage()) // preserve pointer to {{structname}}DB. Otherwise, pointer will is recycled and the map of pointers // Map_{{Structname}}DBID_{{Structname}}DB)[{{structname}}DB hold variable pointers {{structname}}DB_Data := *{{structname}}DB preservedPtrTo{{Structname}} := &{{structname}}DB_Data backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB[{{structname}}DB.ID] = preservedPtrTo{{Structname}} return } // BackRepo{{Structname}}.CheckoutPhaseTwo Checkouts all staged instances of {{Structname}} to the BackRepo // Phase Two is the update of instance with the field in the database func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) CheckoutPhaseTwo(backRepo *BackRepoStruct) (Error error) { // parse all DB instance and update all pointer fields of the translated models instance for _, {{structname}}DB := range backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB { backRepo{{Structname}}.CheckoutPhaseTwoInstance(backRepo, {{structname}}DB) } return } // BackRepo{{Structname}}.CheckoutPhaseTwoInstance Checkouts staged instances of {{Structname}} to the BackRepo // Phase Two is the update of instance with the field in the database func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) CheckoutPhaseTwoInstance(backRepo *BackRepoStruct, {{structname}}DB *{{Structname}}DB) (Error error) { {{structname}} := backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}Ptr[{{structname}}DB.ID] {{structname}}DB.DecodePointers(backRepo, {{structname}}) return } func ({{structname}}DB *{{Structname}}DB) DecodePointers(backRepo *BackRepoStruct, {{structname}} *models.{{Structname}}) { // insertion point for checkout of pointer encoding{{` + string(rune(BackRepoPointerEncodingFieldsCheckout)) + `}} return } // Commit{{Structname}} allows commit of a single {{structname}} (if already staged) func (backRepo *BackRepoStruct) Commit{{Structname}}({{structname}} *models.{{Structname}}) { backRepo.BackRepo{{Structname}}.CommitPhaseOneInstance({{structname}}) if id, ok := backRepo.BackRepo{{Structname}}.Map_{{Structname}}Ptr_{{Structname}}DBID[{{structname}}]; ok { backRepo.BackRepo{{Structname}}.CommitPhaseTwoInstance(backRepo, id, {{structname}}) } backRepo.CommitFromBackNb = backRepo.CommitFromBackNb + 1 } // Commit{{Structname}} allows checkout of a single {{structname}} (if already staged and with a BackRepo id) func (backRepo *BackRepoStruct) Checkout{{Structname}}({{structname}} *models.{{Structname}}) { // check if the {{structname}} is staged if _, ok := backRepo.BackRepo{{Structname}}.Map_{{Structname}}Ptr_{{Structname}}DBID[{{structname}}]; ok { if id, ok := backRepo.BackRepo{{Structname}}.Map_{{Structname}}Ptr_{{Structname}}DBID[{{structname}}]; ok { var {{structname}}DB {{Structname}}DB {{structname}}DB.ID = id if _, err := backRepo.BackRepo{{Structname}}.db.First(&{{structname}}DB, id); err != nil { log.Fatalln("Checkout{{Structname}} : Problem with getting object with id:", id) } backRepo.BackRepo{{Structname}}.CheckoutPhaseOneInstance(&{{structname}}DB) backRepo.BackRepo{{Structname}}.CheckoutPhaseTwoInstance(backRepo, &{{structname}}DB) } } } // CopyBasicFieldsFrom{{Structname}} func ({{structname}}DB *{{Structname}}DB) CopyBasicFieldsFrom{{Structname}}({{structname}} *models.{{Structname}}) { // insertion point for fields commit{{` + string(rune(BackRepoBasicFieldsCommit)) + `}} } // CopyBasicFieldsFrom{{Structname}}_WOP func ({{structname}}DB *{{Structname}}DB) CopyBasicFieldsFrom{{Structname}}_WOP({{structname}} *models.{{Structname}}_WOP) { // insertion point for fields commit{{` + string(rune(BackRepoBasicFieldsCommit)) + `}} } // CopyBasicFieldsFrom{{Structname}}WOP func ({{structname}}DB *{{Structname}}DB) CopyBasicFieldsFrom{{Structname}}WOP({{structname}} *{{Structname}}WOP) { // insertion point for fields commit{{` + string(rune(BackRepoBasicFieldsCommit)) + `}} } // CopyBasicFieldsTo{{Structname}} func ({{structname}}DB *{{Structname}}DB) CopyBasicFieldsTo{{Structname}}({{structname}} *models.{{Structname}}) { // insertion point for checkout of basic fields (back repo to stage){{` + string(rune(BackRepoBasicFieldsCheckout)) + `}} } // CopyBasicFieldsTo{{Structname}}_WOP func ({{structname}}DB *{{Structname}}DB) CopyBasicFieldsTo{{Structname}}_WOP({{structname}} *models.{{Structname}}_WOP) { // insertion point for checkout of basic fields (back repo to stage){{` + string(rune(BackRepoBasicFieldsCheckout)) + `}} } // CopyBasicFieldsTo{{Structname}}WOP func ({{structname}}DB *{{Structname}}DB) CopyBasicFieldsTo{{Structname}}WOP({{structname}} *{{Structname}}WOP) { {{structname}}.ID = int({{structname}}DB.ID) // insertion point for checkout of basic fields (back repo to stage){{` + string(rune(BackRepoBasicFieldsCheckout)) + `}} } // Backup generates a json file from a slice of all {{Structname}}DB instances in the backrepo func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) Backup(dirPath string) { filename := filepath.Join(dirPath, "{{Structname}}DB.json") // organize the map into an array with increasing IDs, in order to have repoductible // backup file forBackup := make([]*{{Structname}}DB, 0) for _, {{structname}}DB := range backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB { forBackup = append(forBackup, {{structname}}DB) } sort.Slice(forBackup[:], func(i, j int) bool { return forBackup[i].ID < forBackup[j].ID }) file, err := json.MarshalIndent(forBackup, "", " ") if err != nil { log.Fatal("Cannot json {{Structname}} ", filename, " ", err.Error()) } err = ioutil.WriteFile(filename, file, 0644) if err != nil { log.Fatal("Cannot write the json {{Structname}} file", err.Error()) } } // Backup generates a json file from a slice of all {{Structname}}DB instances in the backrepo func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) BackupXL(file *xlsx.File) { // organize the map into an array with increasing IDs, in order to have repoductible // backup file forBackup := make([]*{{Structname}}DB, 0) for _, {{structname}}DB := range backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB { forBackup = append(forBackup, {{structname}}DB) } sort.Slice(forBackup[:], func(i, j int) bool { return forBackup[i].ID < forBackup[j].ID }) sh, err := file.AddSheet("{{Structname}}") if err != nil { log.Fatal("Cannot add XL file", err.Error()) } _ = sh row := sh.AddRow() row.WriteSlice(&{{Structname}}_Fields, -1) for _, {{structname}}DB := range forBackup { var {{structname}}WOP {{Structname}}WOP {{structname}}DB.CopyBasicFieldsTo{{Structname}}WOP(&{{structname}}WOP) row := sh.AddRow() row.WriteStruct(&{{structname}}WOP, -1) } } // RestoreXL from the "{{Structname}}" sheet all {{Structname}}DB instances func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) RestoreXLPhaseOne(file *xlsx.File) { // resets the map BackRepo{{Structname}}id_atBckpTime_newID = make(map[uint]uint) sh, ok := file.Sheet["{{Structname}}"] _ = sh if !ok { log.Fatal(errors.New("sheet not found")) } // log.Println("Max row is", sh.MaxRow) err := sh.ForEachRow(backRepo{{Structname}}.rowVisitor{{Structname}}) if err != nil { log.Fatal("Err=", err) } } func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) rowVisitor{{Structname}}(row *xlsx.Row) error { log.Printf("row line %d\n", row.GetCoordinate()) log.Println(row) // skip first line if row.GetCoordinate() > 0 { var {{structname}}WOP {{Structname}}WOP row.ReadStruct(&{{structname}}WOP) // add the unmarshalled struct to the stage {{structname}}DB := new({{Structname}}DB) {{structname}}DB.CopyBasicFieldsFrom{{Structname}}WOP(&{{structname}}WOP) {{structname}}DB_ID_atBackupTime := {{structname}}DB.ID {{structname}}DB.ID = 0 _, err := backRepo{{Structname}}.db.Create({{structname}}DB) if err != nil { log.Fatal(err) } backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB[{{structname}}DB.ID] = {{structname}}DB BackRepo{{Structname}}id_atBckpTime_newID[{{structname}}DB_ID_atBackupTime] = {{structname}}DB.ID } return nil } // RestorePhaseOne read the file "{{Structname}}DB.json" in dirPath that stores an array // of {{Structname}}DB and stores it in the database // the map BackRepo{{Structname}}id_atBckpTime_newID is updated accordingly func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) RestorePhaseOne(dirPath string) { // resets the map BackRepo{{Structname}}id_atBckpTime_newID = make(map[uint]uint) filename := filepath.Join(dirPath, "{{Structname}}DB.json") jsonFile, err := os.Open(filename) // if we os.Open returns an error then handle it if err != nil { log.Fatal("Cannot restore/open the json {{Structname}} file", filename, " ", err.Error()) } // read our opened jsonFile as a byte array. byteValue, _ := ioutil.ReadAll(jsonFile) var forRestore []*{{Structname}}DB err = json.Unmarshal(byteValue, &forRestore) // fill up Map_{{Structname}}DBID_{{Structname}}DB for _, {{structname}}DB := range forRestore { {{structname}}DB_ID_atBackupTime := {{structname}}DB.ID {{structname}}DB.ID = 0 _, err := backRepo{{Structname}}.db.Create({{structname}}DB) if err != nil { log.Fatal(err) } backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB[{{structname}}DB.ID] = {{structname}}DB BackRepo{{Structname}}id_atBckpTime_newID[{{structname}}DB_ID_atBackupTime] = {{structname}}DB.ID } if err != nil { log.Fatal("Cannot restore/unmarshall json {{Structname}} file", err.Error()) } } // RestorePhaseTwo uses all map BackRepo<{{Structname}}>id_atBckpTime_newID // to compute new index func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) RestorePhaseTwo() { for _, {{structname}}DB := range backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB { // next line of code is to avert unused variable compilation error _ = {{structname}}DB // insertion point for reindexing pointers encoding{{` + string(rune(BackRepoPointerEncodingFieldsReindexing)) + `}} // update databse with new index encoding db, _ := backRepo{{Structname}}.db.Model({{structname}}DB) _, err := db.Updates(*{{structname}}DB) if err != nil { log.Fatal(err) } } } // BackRepo{{Structname}}.ResetReversePointers commits all staged instances of {{Structname}} to the BackRepo // Phase Two is the update of instance with the field in the database func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) ResetReversePointers(backRepo *BackRepoStruct) (Error error) { for idx, {{structname}} := range backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}Ptr { backRepo{{Structname}}.ResetReversePointersInstance(backRepo, idx, {{structname}}) } return } func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) ResetReversePointersInstance(backRepo *BackRepoStruct, idx uint, {{structname}} *models.{{Structname}}) (Error error) { // fetch matching {{structname}}DB if {{structname}}DB, ok := backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB[idx]; ok { _ = {{structname}}DB // to avoid unused variable error if there are no reverse to reset // insertion point for reverse pointers reset{{` + string(rune(BackRepoPointerReversePointersReseting)) + `}} // end of insertion point for reverse pointers reset } return } // this field is used during the restauration process. // it stores the ID at the backup time and is used for renumbering var BackRepo{{Structname}}id_atBckpTime_newID map[uint]uint `
View Source
const BackRepoTemplateCode = `// do not modify, generated file package orm import ( "bufio" "bytes" "context" "io/ioutil" "log" "os" "path/filepath" "sync" "{{PkgPathRoot}}/db" "{{PkgPathRoot}}/models" ` + gormFirstLineToBeRemoved + ` "{{PkgPathRoot}}/orm/dbgorm" ` + gormLastLineToBeRemoved + ` "github.com/tealeg/xlsx/v3" ) // BackRepoStruct supports callback functions type BackRepoStruct struct { // insertion point for per struct back repo declarations{{` + string(rune(BackRepoPerStructDeclarations)) + `}} CommitFromBackNb uint // records commit increments when performed by the back PushFromFrontNb uint // records commit increments when performed by the front stage *models.StageStruct // the back repo can broadcast the CommitFromBackNb to all interested subscribers rwMutex sync.RWMutex subscribersRwMutex sync.RWMutex subscribers []chan int } func NewBackRepo(stage *models.StageStruct, filename string) (backRepo *BackRepoStruct) { var db db.DBInterface ` + liteFirstLineToBeRemoved + ` db = NewDBLite() ` + liteLastLineToBeRemoved + ` ` + gormFirstLineToBeRemoved + ` db = dbgorm.NewDBWrapper(filename, "{{PkgPathRootWithoutSlashes}}",{{` + string(rune(BackRepoPerStructRefToStructDB)) + `}} ) ` + gormLastLineToBeRemoved + ` backRepo = new(BackRepoStruct) // insertion point for per struct back repo declarations{{` + string(rune(BackRepoPerStructInits)) + `}} stage.BackRepo = backRepo backRepo.stage = stage return } func (backRepo *BackRepoStruct) GetStage() (stage *models.StageStruct) { stage = backRepo.stage return } func (backRepo *BackRepoStruct) GetLastCommitFromBackNb() uint { return backRepo.CommitFromBackNb } func (backRepo *BackRepoStruct) GetLastPushFromFrontNb() uint { return backRepo.PushFromFrontNb } func (backRepo *BackRepoStruct) IncrementCommitFromBackNb() uint { if backRepo.stage.OnInitCommitCallback != nil { backRepo.stage.OnInitCommitCallback.BeforeCommit(backRepo.stage) } if backRepo.stage.OnInitCommitFromBackCallback != nil { backRepo.stage.OnInitCommitFromBackCallback.BeforeCommit(backRepo.stage) } backRepo.CommitFromBackNb = backRepo.CommitFromBackNb + 1 backRepo.broadcastNbCommitToBack() return backRepo.CommitFromBackNb } func (backRepo *BackRepoStruct) IncrementPushFromFrontNb() uint { if backRepo.stage.OnInitCommitCallback != nil { backRepo.stage.OnInitCommitCallback.BeforeCommit(backRepo.stage) } if backRepo.stage.OnInitCommitFromFrontCallback != nil { backRepo.stage.OnInitCommitFromFrontCallback.BeforeCommit(backRepo.stage) } backRepo.PushFromFrontNb = backRepo.PushFromFrontNb + 1 return backRepo.CommitFromBackNb } // Commit the BackRepoStruct inner variables and link to the database func (backRepo *BackRepoStruct) Commit(stage *models.StageStruct) { // forbid read of back repo during commit backRepo.rwMutex.Lock() defer backRepo.rwMutex.Unlock() // insertion point for per struct back repo phase one commit{{` + string(rune(BackRepoPerStructPhaseOneCommits)) + `}} // insertion point for per struct back repo phase two commit{{` + string(rune(BackRepoPerStructPhaseTwoCommits)) + `}} backRepo.IncrementCommitFromBackNb() } // Checkout the database into the stage func (backRepo *BackRepoStruct) Checkout(stage *models.StageStruct) { // insertion point for per struct back repo phase one commit{{` + string(rune(BackRepoPerStructPhaseOneCheckouts)) + `}} // insertion point for per struct back repo phase two commit{{` + string(rune(BackRepoPerStructPhaseTwoCheckouts)) + `}} } // Backup the BackRepoStruct func (backRepo *BackRepoStruct) Backup(stage *models.StageStruct, dirPath string) { os.MkdirAll(dirPath, os.ModePerm) // insertion point for per struct backup{{` + string(rune(BackRepoBackup)) + `}} } // Backup in XL the BackRepoStruct func (backRepo *BackRepoStruct) BackupXL(stage *models.StageStruct, dirPath string) { os.MkdirAll(dirPath, os.ModePerm) // open an existing file file := xlsx.NewFile() // insertion point for per struct backup{{` + string(rune(BackRepoBackupXL)) + `}} var b bytes.Buffer writer := bufio.NewWriter(&b) file.Write(writer) theBytes := b.Bytes() filename := filepath.Join(dirPath, "bckp.xlsx") err := ioutil.WriteFile(filename, theBytes, 0644) if err != nil { log.Panic("Cannot write the XL file", err.Error()) } } // Restore the database into the back repo func (backRepo *BackRepoStruct) Restore(stage *models.StageStruct, dirPath string) { backRepo.stage.Commit() backRepo.stage.Reset() backRepo.stage.Checkout() // // restauration first phase (create DB instance with new IDs) // // insertion point for per struct backup{{` + string(rune(BackRepoRestorePhaseOne)) + `}} // // restauration second phase (reindex pointers with the new ID) // // insertion point for per struct backup{{` + string(rune(BackRepoRestorePhaseTwo)) + `}} backRepo.stage.Checkout() } // Restore the database into the back repo func (backRepo *BackRepoStruct) RestoreXL(stage *models.StageStruct, dirPath string) { // clean the stage backRepo.stage.Reset() // commit the cleaned stage backRepo.stage.Commit() // open an existing file filename := filepath.Join(dirPath, "bckp.xlsx") file, err := xlsx.OpenFile(filename) _ = file if err != nil { log.Panic("Cannot read the XL file", err.Error()) } // // restauration first phase (create DB instance with new IDs) // // insertion point for per struct backup{{` + string(rune(BackRepoRestoreXLPhaseOne)) + `}} // commit the restored stage backRepo.stage.Commit() } func (backRepoStruct *BackRepoStruct) SubscribeToCommitNb(ctx context.Context) <-chan int { ch := make(chan int) backRepoStruct.subscribersRwMutex.Lock() backRepoStruct.subscribers = append(backRepoStruct.subscribers, ch) backRepoStruct.subscribersRwMutex.Unlock() // Goroutine to remove subscriber when context is done go func() { <-ctx.Done() backRepoStruct.unsubscribe(ch) }() return ch } // unsubscribe removes a subscriber's channel from the subscribers slice. func (backRepoStruct *BackRepoStruct) unsubscribe(ch chan int) { backRepoStruct.subscribersRwMutex.Lock() defer backRepoStruct.subscribersRwMutex.Unlock() for i, subscriber := range backRepoStruct.subscribers { if subscriber == ch { backRepoStruct.subscribers = append(backRepoStruct.subscribers[:i], backRepoStruct.subscribers[i+1:]...) close(ch) // Close the channel to signal completion break } } } func (backRepoStruct *BackRepoStruct) broadcastNbCommitToBack() { backRepoStruct.subscribersRwMutex.RLock() subscribers := make([]chan int, len(backRepoStruct.subscribers)) copy(subscribers, backRepoStruct.subscribers) backRepoStruct.subscribersRwMutex.RUnlock() for _, ch := range subscribers { select { case ch <- int(backRepoStruct.CommitFromBackNb): // Successfully sent commit from back default: // Subscriber is not ready to receive; skip to avoid blocking } } } `
View Source
const DbTmpl = `// generated code - do not edit package orm import ( "errors" "fmt" "strconv" "sync" "{{PkgPathRoot}}/db" ) // Ensure DBLite implements DBInterface var _ db.DBInterface = &DBLite{} // DBLite is an in-memory database implementation of DBInterface type DBLite struct { // Mutex to protect shared resources mu sync.RWMutex // insertion point definitions{{` + string(rune(DBliteMapFieldDefinition)) + `}} } // NewDBLite creates a new instance of DBLite func NewDBLite() *DBLite { return &DBLite{ // insertion point maps init{{` + string(rune(DBliteMapFieldInit)) + `}} } } // Create inserts a new record into the database func (db *DBLite) Create(instanceDB any) (db.DBInterface, error) { if instanceDB == nil { return nil, errors.New("{{PkgPathRoot}}, instanceDB cannot be nil") } db.mu.Lock() defer db.mu.Unlock() switch v := instanceDB.(type) { // insertion point create{{` + string(rune(DBliteMapFieldCreate)) + `}} default: return nil, errors.New("{{PkgPathRoot}}, unsupported type in Create") } return db, nil } // Unscoped sets the unscoped flag for soft-deletes (not used in this implementation) func (db *DBLite) Unscoped() (db.DBInterface, error) { return db, nil } // Model is a placeholder in this implementation func (db *DBLite) Model(instanceDB any) (db.DBInterface, error) { // Not implemented as types are handled directly return db, nil } // Delete removes a record from the database func (db *DBLite) Delete(instanceDB any) (db.DBInterface, error) { if instanceDB == nil { return nil, errors.New("{{PkgPathRoot}}, instanceDB cannot be nil") } db.mu.Lock() defer db.mu.Unlock() switch v := instanceDB.(type) { // insertion point delete{{` + string(rune(DBliteMapFieldDelete)) + `}} default: return nil, errors.New("{{PkgPathRoot}}, unsupported type in Delete") } return db, nil } // Save updates or inserts a record into the database func (db *DBLite) Save(instanceDB any) (db.DBInterface, error) { if instanceDB == nil { return nil, errors.New("{{PkgPathRoot}}, instanceDB cannot be nil") } db.mu.Lock() defer db.mu.Unlock() switch v := instanceDB.(type) { // insertion point delete{{` + string(rune(DBliteMapFieldSave)) + `}} default: return nil, errors.New("{{PkgPathRoot}}, Save: unsupported type") } } // Updates modifies an existing record in the database func (db *DBLite) Updates(instanceDB any) (db.DBInterface, error) { if instanceDB == nil { return nil, errors.New("{{PkgPathRoot}}, instanceDB cannot be nil") } db.mu.Lock() defer db.mu.Unlock() switch v := instanceDB.(type) { // insertion point delete{{` + string(rune(DBliteMapFieldUpdate)) + `}} default: return nil, errors.New("{{PkgPathRoot}}, unsupported type in Updates") } return db, nil } // Find retrieves all records of a type from the database func (db *DBLite) Find(instanceDBs any) (db.DBInterface, error) { db.mu.RLock() defer db.mu.RUnlock() switch ptr := instanceDBs.(type) { // insertion point find{{` + string(rune(DBliteMapFieldFind)) + `}} default: return nil, errors.New("{{PkgPathRoot}}, Find: unsupported type") } } // First retrieves the first record of a type from the database func (db *DBLite) First(instanceDB any, conds ...any) (db.DBInterface, error) { if len(conds) != 1 { return nil, errors.New("{{PkgPathRoot}}, Do not process when conds is not a single parameter") } var i uint64 var err error switch cond := conds[0].(type) { case string: i, err = strconv.ParseUint(cond, 10, 32) // Base 10, 32-bit unsigned int if err != nil { return nil, errors.New("{{PkgPathRoot}}, conds[0] is not a string number") } case uint64: i = cond case uint: i = uint64(cond) default: return nil, errors.New("{{PkgPathRoot}}, conds[0] is not a string or uint64") } db.mu.RLock() defer db.mu.RUnlock() switch instanceDB.(type) { // insertion point first{{` + string(rune(DBliteMapFieldFirst)) + `}} default: return nil, errors.New("{{PkgPathRoot}}, Unkown type") } return db, nil } `
View Source
const GetInstanceDBFromInstanceTemplateCode = `// generated code - do not edit package orm import ( "{{PkgPathRoot}}/models" ) type GongstructDB interface { } func GetInstanceDBFromInstance[T models.Gongstruct, T2 GongstructDB]( stage *models.StageStruct, backRepo *BackRepoStruct, instance *T) (ret *T2) { switch concreteInstance := any(instance).(type) { // insertion point for per struct backup{{` + string(rune(GetInstanceDBFromInstanceSwitchCaseGet)) + `}} default: _ = concreteInstance } return } func GetID[T models.Gongstruct]( stage *models.StageStruct, backRepo *BackRepoStruct, instance *T) (id int) { switch inst := any(instance).(type) { // insertion point for per struct backup{{` + string(rune(GetInstanceDBFromInstanceSwitchCaseGetID)) + `}} default: _ = inst } return } func GetIDPointer[T models.PointerToGongstruct]( stage *models.StageStruct, backRepo *BackRepoStruct, instance T) (id int) { switch inst := any(instance).(type) { // insertion point for per struct backup{{` + string(rune(GetInstanceDBFromInstanceSwitchCaseGetID)) + `}} default: _ = inst } return } `
View Source
const GetReverseFieldOwnerName = `// generated code - do not edit package orm import ( "{{PkgPathRoot}}/models" ) func GetReverseFieldOwnerName[T models.Gongstruct]( stage *models.StageStruct, backRepo *BackRepoStruct, instance *T, reverseField *models.ReverseField) (res string) { res = "" switch inst := any(instance).(type) { // insertion point{{` + string(rune(GetReverseFieldOwnerNameSwitch)) + `}} default: _ = inst } return } func GetReverseFieldOwner[T models.Gongstruct]( stage *models.StageStruct, backRepo *BackRepoStruct, instance *T, reverseField *models.ReverseField) (res any) { res = nil switch inst := any(instance).(type) { // insertion point{{` + string(rune(GetReverseFieldOwnerSwitch)) + `}} default: _ = inst } return res } `
View Source
const Gorm = " gorm"
View Source
const IntSliceTemplateCode = `` /* 530-byte string literal not displayed */
View Source
const Lite = " lite"
Variables ¶
View Source
var BackRepoDataSubTemplate map[string]string = map[string]string{ string(rune(BackRepoDataSlice)): ` {{Structname}}APIs []*{{Structname}}API`, string(rune(BackRepoDataSliceCopies)): ` for _, {{structname}}DB := range backRepo.BackRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB { var {{structname}}API {{Structname}}API {{structname}}API.ID = {{structname}}DB.ID {{structname}}API.{{Structname}}PointersEncoding = {{structname}}DB.{{Structname}}PointersEncoding {{structname}}DB.CopyBasicFieldsTo{{Structname}}_WOP(&{{structname}}API.{{Structname}}_WOP) backRepoData.{{Structname}}APIs = append(backRepoData.{{Structname}}APIs, &{{structname}}API) } `, }
View Source
var BackRepoFieldSubTemplateCode map[BackRepoPerStructSubTemplate]string = map[BackRepoPerStructSubTemplate]string{ BackRepoDeclarationBasicField: `{{DeclarationPrefixPrologue}} {{DeclarationPrefixPadding}}// Declation for basic field {{structname}}DB.{{FieldName}} {{DeclarationPrefixPadding}}{{FieldName}}_Data sql.{{SqlNullType}}{{DeclarationPrefixEpilogue}}`, BackRepoDeclarationTimeField: `{{DeclarationPrefixPrologue}} {{DeclarationPrefixPadding}}// Declation for basic field {{structname}}DB.{{FieldName}} {{DeclarationPrefixPadding}}{{FieldName}}_Data sql.NullTime{{DeclarationPrefixEpilogue}}`, BackRepoDeclarationBasicBooleanField: `{{DeclarationPrefixPrologue}} {{DeclarationPrefixPadding}}// Declation for basic field {{structname}}DB.{{FieldName}} {{DeclarationPrefixPadding}}// provide the sql storage for the boolan {{DeclarationPrefixPadding}}{{FieldName}}_Data sql.NullBool{{DeclarationPrefixEpilogue}}`, BackRepoPointerEncoding: `{{DeclarationPrefixPrologue}} {{DeclarationPrefixPadding}}// field {{FieldName}} is a pointer to another Struct (optional or 0..1) {{DeclarationPrefixPadding}}// This field is generated into another field to enable AS ONE association {{DeclarationPrefixPadding}}{{FieldName}}ID sql.NullInt64{{DeclarationPrefixEpilogue}}`, BackRepoSliceOfPointersEncoding: `{{DeclarationPrefixPrologue}} {{DeclarationPrefixPadding}}// field {{FieldName}} is a slice of pointers to another Struct (optional or 0..1) {{DeclarationPrefixPadding}}{{FieldName}} IntSlice` + " `" + `gorm:"type:TEXT"` + "`{{DeclarationPrefixEpilogue}}", BackRepoCommitBasicField: ` {{structname}}DB.{{FieldName}}_Data.{{SqlNullType}} = {{structname}}.{{FieldName}} {{structname}}DB.{{FieldName}}_Data.Valid = true`, BackRepoCommitBasicFieldEnum: ` {{structname}}DB.{{FieldName}}_Data.String = {{structname}}.{{FieldName}}.ToString() {{structname}}DB.{{FieldName}}_Data.Valid = true`, BackRepoCommitBasicFieldInt: ` {{structname}}DB.{{FieldName}}_Data.Int64 = int64({{structname}}.{{FieldName}}) {{structname}}DB.{{FieldName}}_Data.Valid = true`, BackRepoCommitTimeField: ` {{structname}}DB.{{FieldName}}_Data.Time = {{structname}}.{{FieldName}} {{structname}}DB.{{FieldName}}_Data.Valid = true`, BackRepoCommitBasicBooleanField: ` {{structname}}DB.{{FieldName}}_Data.Bool = {{structname}}.{{FieldName}} {{structname}}DB.{{FieldName}}_Data.Valid = true`, BackRepoCommitPointerToStructField: ` // commit pointer value {{structname}}.{{FieldName}} translates to updating the {{structname}}.{{FieldName}}ID {{structname}}DB.{{FieldNameForAssignment}}ID.Valid = true // allow for a 0 value (nil association) if {{structname}}.{{FieldName}} != nil { if {{FieldNameForDeclaration}}Id, ok := backRepo.BackRepo{{AssociationStructName}}.Map_{{AssociationStructName}}Ptr_{{AssociationStructName}}DBID[{{structname}}.{{FieldName}}]; ok { {{structname}}DB.{{FieldNameForAssignment}}ID.Int64 = int64({{FieldNameForDeclaration}}Id) {{structname}}DB.{{FieldNameForAssignment}}ID.Valid = true } } else { {{structname}}DB.{{FieldNameForAssignment}}ID.Int64 = 0 {{structname}}DB.{{FieldNameForAssignment}}ID.Valid = true } `, BackRepoCommitSliceOfPointerToStructField: ` // 1. reset {{structname}}DB.{{Structname}}PointersEncoding.{{FieldName}} = make([]int, 0) // 2. encode for _, {{associationStructName}}AssocEnd := range {{structname}}.{{FieldName}} { {{associationStructName}}AssocEnd_DB := backRepo.BackRepo{{AssociationStructName}}.Get{{AssociationStructName}}DBFrom{{AssociationStructName}}Ptr({{associationStructName}}AssocEnd) // the stage might be inconsistant, meaning that the {{associationStructName}}AssocEnd_DB might // be missing from the stage. In this case, the commit operation is robust // An alternative would be to crash here to reveal the missing element. if {{associationStructName}}AssocEnd_DB == nil { continue } {{structname}}DB.{{Structname}}PointersEncoding.{{FieldName}} = append({{structname}}DB.{{Structname}}PointersEncoding.{{FieldName}}, int({{associationStructName}}AssocEnd_DB.ID)) } `, BackRepoCheckoutBasicField: ` {{structname}}.{{FieldName}} = {{structname}}DB.{{FieldName}}_Data.{{SqlNullType}}`, BackRepoCheckoutTimeField: ` {{structname}}.{{FieldName}} = {{structname}}DB.{{FieldName}}_Data.Time`, BackRepoCheckoutBasicFieldEnum: ` {{structname}}.{{FieldName}}.FromString({{structname}}DB.{{FieldName}}_Data.String)`, BackRepoCheckoutBasicFieldInt: ` {{structname}}.{{FieldName}} = {{FieldType}}({{structname}}DB.{{FieldName}}_Data.Int64)`, BackRepoCheckoutBasicFieldIntEnum: ` {{structname}}.{{FieldName}} = models.{{FieldType}}({{structname}}DB.{{FieldName}}_Data.Int64)`, BackRepoCheckoutBasicFieldBoolean: ` {{structname}}.{{FieldName}} = {{structname}}DB.{{FieldName}}_Data.Bool`, BackRepoCheckoutPointerToStructStageField: ` // {{FieldName}} field { id := {{structname}}DB.{{FieldNameForAssignment}}ID.Int64 if id != 0 { tmp, ok := backRepo.BackRepo{{AssociationStructName}}.Map_{{AssociationStructName}}DBID_{{AssociationStructName}}Ptr[uint(id)] if !ok { log.Fatalln("DecodePointers: {{structname}}.{{FieldName}}, unknown pointer id", id) } // updates only if field has changed if {{structname}}.{{FieldName}} == nil || {{structname}}.{{FieldName}} != tmp { {{structname}}.{{FieldName}} = tmp } } else { {{structname}}.{{FieldName}} = nil } } `, BackRepoReindexingPointerToStruct: ` // reindexing {{FieldName}} field if {{structname}}DB.{{FieldNameForAssignment}}ID.Int64 != 0 { {{structname}}DB.{{FieldNameForAssignment}}ID.Int64 = int64(BackRepo{{AssociationStructName}}id_atBckpTime_newID[uint({{structname}}DB.{{FieldNameForAssignment}}ID.Int64)]) {{structname}}DB.{{FieldNameForAssignment}}ID.Valid = true } `, BackRepoCheckoutSliceOfPointerToStructStageField: ` // This loop redeem {{structname}}.{{FieldName}} in the stage from the encode in the back repo // It parses all {{AssociationStructName}}DB in the back repo and if the reverse pointer encoding matches the back repo ID // it appends the stage instance // 1. reset the slice {{structname}}.{{FieldName}} = {{structname}}.{{FieldName}}[:0] for _, _{{AssociationStructName}}id := range {{structname}}DB.{{Structname}}PointersEncoding.{{FieldName}} { {{structname}}.{{FieldName}} = append({{structname}}.{{FieldName}}, backRepo.BackRepo{{AssociationStructName}}.Map_{{AssociationStructName}}DBID_{{AssociationStructName}}Ptr[uint(_{{AssociationStructName}}id)]) } `, }
View Source
var BackRepoSubTemplate map[string]string = map[string]string{ string(rune(BackRepoPerStructDeclarations)): ` BackRepo{{Structname}} BackRepo{{Structname}}Struct `, string(rune(BackRepoPerStructInits)): ` backRepo.BackRepo{{Structname}} = BackRepo{{Structname}}Struct{ Map_{{Structname}}DBID_{{Structname}}Ptr: make(map[uint]*models.{{Structname}}, 0), Map_{{Structname}}DBID_{{Structname}}DB: make(map[uint]*{{Structname}}DB, 0), Map_{{Structname}}Ptr_{{Structname}}DBID: make(map[*models.{{Structname}}]uint, 0), db: db, stage: stage, }`, string(rune(BackRepoPerStructRefToStructDB)): ` &{{Structname}}DB{},`, string(rune(BackRepoPerStructPhaseOneCommits)): ` backRepo.BackRepo{{Structname}}.CommitPhaseOne(stage)`, string(rune(BackRepoPerStructPhaseTwoCommits)): ` backRepo.BackRepo{{Structname}}.CommitPhaseTwo(backRepo)`, string(rune(BackRepoPerStructPhaseOneCheckouts)): ` backRepo.BackRepo{{Structname}}.CheckoutPhaseOne()`, string(rune(BackRepoPerStructPhaseTwoCheckouts)): ` backRepo.BackRepo{{Structname}}.CheckoutPhaseTwo(backRepo)`, string(rune(BackRepoInitAndCommit)): ` map_{{Structname}}DBID_{{Structname}}DB = nil map_{{Structname}}Ptr_{{Structname}}DBID = nil map_{{Structname}}DBID_{{Structname}}Ptr = nil if err := BackRepo{{Structname}}Init( CreateMode, db); err != nil { return err } `, string(rune(BackRepoInitAndCheckout)): ` map_{{Structname}}DBID_{{Structname}}DB = nil map_{{Structname}}Ptr_{{Structname}}DBID = nil map_{{Structname}}DBID_{{Structname}}Ptr = nil if err := BackRepo{{Structname}}Init( CreateMode, db); err != nil { err := errors.New("AllORMToModels, CreateMode Translation of {{Structname}} failed") return err } `, string(rune(BackRepoCheckout)): ` if err := BackRepo{{Structname}}Init( UpdateMode, db); err != nil { err := errors.New("AllORMToModels, UpdateMode Translation of {{Structname}} failed") return err } `, string(rune(BackRepoCommit)): ` if err := BackRepo{{Structname}}Init( UpdateMode, db); err != nil { return err } `, string(rune(BackRepoBackup)): ` backRepo.BackRepo{{Structname}}.Backup(dirPath)`, string(rune(BackRepoBackupXL)): ` backRepo.BackRepo{{Structname}}.BackupXL(file)`, string(rune(BackRepoRestorePhaseOne)): ` backRepo.BackRepo{{Structname}}.RestorePhaseOne(dirPath)`, string(rune(BackRepoRestoreXLPhaseOne)): ` backRepo.BackRepo{{Structname}}.RestoreXLPhaseOne(file)`, string(rune(BackRepoRestorePhaseTwo)): ` backRepo.BackRepo{{Structname}}.RestorePhaseTwo()`, }
View Source
var DBliteSubTemplates map[string]string = map[string]string{ string(rune(DBliteMapFieldDefinition)): ` {{structname}}DBs map[uint]*{{Structname}}DB nextID{{Structname}}DB uint`, string(rune(DBliteMapFieldInit)): ` {{structname}}DBs: make(map[uint]*{{Structname}}DB),`, string(rune(DBliteMapFieldCreate)): ` case *{{Structname}}DB: db.nextID{{Structname}}DB++ v.ID = db.nextID{{Structname}}DB db.{{structname}}DBs[v.ID] = v`, string(rune(DBliteMapFieldDelete)): ` case *{{Structname}}DB: delete(db.{{structname}}DBs, v.ID)`, string(rune(DBliteMapFieldSave)): ` case *{{Structname}}DB: db.{{structname}}DBs[v.ID] = v return db, nil`, string(rune(DBliteMapFieldUpdate)): ` case *{{Structname}}DB: if existing, ok := db.{{structname}}DBs[v.ID]; ok { *existing = *v } else { return nil, errors.New("db {{Structname}} {{PkgPathRoot}}, record not found") }`, string(rune(DBliteMapFieldFind)): ` case *[]{{Structname}}DB: *ptr = make([]{{Structname}}DB, 0, len(db.{{structname}}DBs)) for _, v := range db.{{structname}}DBs { *ptr = append(*ptr, *v) } return db, nil`, string(rune(DBliteMapFieldFirst)): ` case *{{Structname}}DB: tmp, ok := db.{{structname}}DBs[uint(i)] if !ok { return nil, errors.New(fmt.Sprintf("db.First {{Structname}} Unkown entry %d", i)) } {{structname}}DB, _ := instanceDB.(*{{Structname}}DB) *{{structname}}DB = *tmp `, }
View Source
var GetInstanceDBFromInstanceSubTemplate map[string]string = map[string]string{ string(rune(GetInstanceDBFromInstanceSwitchCaseGetID)): ` case *models.{{Structname}}: tmp := GetInstanceDBFromInstance[models.{{Structname}}, {{Structname}}DB]( stage, backRepo, inst, ) id = int(tmp.ID)`, string(rune(GetInstanceDBFromInstanceSwitchCaseGet)): ` case *models.{{Structname}}: {{structname}}Instance := any(concreteInstance).(*models.{{Structname}}) ret2 := backRepo.BackRepo{{Structname}}.Get{{Structname}}DBFrom{{Structname}}Ptr({{structname}}Instance) ret = any(ret2).(*T2)`, }
View Source
var GetReverseFieldOwnerNameSubSubTemplateCode map[GetReverseFieldOwnerNameSubTemplateId]string = map[GetReverseFieldOwnerNameSubTemplateId]string{ GetReverseFieldOwnerNameMasterSwitchCodeStart: ` case "{{AssocStructName}}": switch reverseField.Fieldname {`, GetReverseFieldOwnerNameSwitchCode: ` case "{{FieldName}}": if _{{assocStructName}}, ok := stage.{{AssocStructName}}_{{FieldName}}_reverseMap[inst]; ok { res = _{{assocStructName}}.Name }`, GetReverseFieldOwnerSwitchCode: ` case "{{FieldName}}": res = stage.{{AssocStructName}}_{{FieldName}}_reverseMap[inst]`, GetReverseFieldOwnerNameMasterSwitchCodeEnd: ` }`, }
View Source
var GetReverseFieldOwnerNameSubTemplateCode map[GetReverseFieldOwnerNameId]string = map[GetReverseFieldOwnerNameId]string{ GetReverseFieldOwnerNameSwitch: ` case *models.{{Structname}}: switch reverseField.GongstructName { // insertion point{{fieldToFormCodeName}} } `, GetReverseFieldOwnerSwitch: ` case *models.{{Structname}}: switch reverseField.GongstructName { // insertion point{{fieldToFormCode}} } `, }
Functions ¶
func MultiCodeGeneratorBackRepo ¶
func MultiCodeGeneratorBackRepo( modelPkg *models.ModelPkg, pkgName string, pkgGoPath string, dirPath string)
MultiCodeGeneratorBackRepo parses mdlPkg and generates the code for the back repository code
func RemoveTargetedLines ¶
RemoveTargetedLines removes lines between specific comment markers based on the target.
Types ¶
type BackRepoDataSubTemplateInsertion ¶
type BackRepoDataSubTemplateInsertion int
const ( BackRepoDataSlice BackRepoDataSubTemplateInsertion = iota BackRepoDataSliceCopies )
type BackRepoInsertionPoint ¶
type BackRepoInsertionPoint int
insertion points
const ( BackRepoBasicFieldsDeclaration BackRepoInsertionPoint = iota BackRepoBasicAndTimeFieldsName BackRepoWOPInitialIndex BackRepoBasicAndTimeFieldsWOPDeclaration BackRepoPointerEncodingFieldsDeclaration BackRepoPointerEncodingFieldsWOPDeclaration BackRepoBasicFieldsCommit BackRepoPointerEncodingFieldsCommit BackRepoBasicFieldsCheckout BackRepoPointerEncodingFieldsCheckout BackRepoPointerEncodingFieldsReindexing BackRepoPointerReversePointersReseting BackRepoNbInsertionPoints )
type BackRepoPerStructSubTemplate ¶
type BackRepoPerStructSubTemplate int
const ( BackRepoDeclarationBasicField BackRepoPerStructSubTemplate = iota BackRepoCommitBasicField BackRepoCheckoutBasicField BackRepoDeclarationTimeField BackRepoCommitTimeField BackRepoCheckoutTimeField BackRepoCommitBasicFieldEnum BackRepoCheckoutBasicFieldEnum BackRepoCommitBasicFieldInt BackRepoCheckoutBasicFieldInt BackRepoCheckoutBasicFieldIntEnum BackRepoDeclarationBasicBooleanField BackRepoCommitBasicBooleanField BackRepoCheckoutBasicFieldBoolean BackRepoPointerEncoding BackRepoSliceOfPointersEncoding BackRepoCommitPointerToStructField BackRepoCheckoutPointerToStructStageField BackRepoReindexingPointerToStruct BackRepoCommitSliceOfPointerToStructField BackRepoCheckoutSliceOfPointerToStructStageField )
type BackRepoSubTemplateInsertion ¶
type BackRepoSubTemplateInsertion int
const ( BackRepoPerStructDeclarations BackRepoSubTemplateInsertion = iota BackRepoPerStructInits BackRepoPerStructRefToStructDB BackRepoPerStructPhaseOneCommits BackRepoPerStructPhaseTwoCommits BackRepoPerStructPhaseOneCheckouts BackRepoPerStructPhaseTwoCheckouts BackRepoInitAndCommit BackRepoInitAndCheckout BackRepoCommit BackRepoCheckout BackRepoBackup BackRepoBackupXL BackRepoRestorePhaseOne BackRepoRestoreXLPhaseOne BackRepoRestorePhaseTwo )
type DBliteInsertionPointId ¶
type DBliteInsertionPointId int
const ( DBliteMapFieldDefinition DBliteInsertionPointId = iota DBliteMapFieldInit DBliteMapFieldCreate DBliteMapFieldDelete DBliteMapFieldSave DBliteMapFieldUpdate DBliteMapFieldFind DBliteMapFieldFirst )
type GetInstanceDBFromInstanceSubTemplateInsertion ¶
type GetInstanceDBFromInstanceSubTemplateInsertion int
const ( GetInstanceDBFromInstanceSwitchCaseGetID GetInstanceDBFromInstanceSubTemplateInsertion = iota GetInstanceDBFromInstanceSwitchCaseGet )
type GetReverseFieldOwnerNameId ¶
type GetReverseFieldOwnerNameId int
const ( GetReverseFieldOwnerNameSwitch GetReverseFieldOwnerNameId = iota GetReverseFieldOwnerSwitch GetReverseFieldOwnerNameNb )
type GetReverseFieldOwnerNameSubTemplateId ¶
type GetReverseFieldOwnerNameSubTemplateId int
const ( GetReverseFieldOwnerNameSwitchCode GetReverseFieldOwnerNameSubTemplateId = iota GetReverseFieldOwnerSwitchCode GetReverseFieldOwnerNameMasterSwitchCodeStart GetReverseFieldOwnerNameMasterSwitchCodeEnd )
Source Files ¶
Click to show internal directories.
Click to hide internal directories.